From 63f714408022199b8821e5327ebc8d9c4b607d61 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Wed, 23 Apr 2025 12:59:10 +0200 Subject: [PATCH 01/88] fix: sharing predicted chunk with user --- lerobot/common/policies/act/modeling_act.py | 31 +++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index 72d4df03a2..2623e16553 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -142,6 +142,37 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: self._action_queue.extend(actions.transpose(0, 1)) return self._action_queue.popleft() + @torch.no_grad + def predict_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations. + + This method returns the raw chunk of actions predicted by the model without + any queue management or action consumption logic. + + Args: + batch: A dictionary of observation tensors. + + Returns: + A tensor of shape (batch_size, chunk_size, action_dim) containing predicted actions. + """ + self.eval() + + batch = self.normalize_inputs(batch) + if self.config.image_features: + batch = dict(batch) # shallow copy so that adding a key doesn't modify the original + batch["observation.images"] = [batch[key] for key in self.config.image_features] + + # If we are using temporal ensembling + if self.config.temporal_ensemble_coeff is not None: + actions = self.model(batch)[0] # (batch_size, chunk_size, action_dim) + actions = self.unnormalize_outputs({"action": actions})["action"] + return actions + + # Standard action prediction + actions = self.model(batch)[0] + actions = self.unnormalize_outputs({"action": actions})["action"] + return actions + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) From 9020109d294fc5e2a5cd85f257b2430aeee81773 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 23 Apr 2025 21:53:09 +0200 Subject: [PATCH 02/88] [pre-commit.ci] pre-commit autoupdate (#1011) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a778ce0e9e..e5fc2e9205 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -48,7 +48,7 @@ repos: - id: pyupgrade - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.5 + rev: v0.11.6 hooks: - id: ruff args: [--fix] @@ -62,7 +62,7 @@ repos: - id: gitleaks - repo: https://github.com/woodruffw/zizmor-pre-commit - rev: v1.5.2 + rev: v1.6.0 hooks: - id: zizmor From da8bec015352ec2217a98729e62be1ac68db5252 Mon Sep 17 00:00:00 2001 From: Simon Alibert <75076266+aliberts@users.noreply.github.com> Date: Thu, 24 Apr 2025 09:26:47 +0200 Subject: [PATCH 03/88] Revert "[pre-commit.ci] pre-commit autoupdate" (#1025) --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e5fc2e9205..a778ce0e9e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -48,7 +48,7 @@ repos: - id: pyupgrade - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.6 + rev: v0.11.5 hooks: - id: ruff args: [--fix] @@ -62,7 +62,7 @@ repos: - id: gitleaks - repo: https://github.com/woodruffw/zizmor-pre-commit - rev: v1.6.0 + rev: v1.5.2 hooks: - id: zizmor From 309decabbd88908c3785b9ee96433dc14e4f3f94 Mon Sep 17 00:00:00 2001 From: Adil Zouitine Date: Thu, 24 Apr 2025 09:42:03 +0200 Subject: [PATCH 04/88] fix(ci): Pin draccus (<0.10.0) and torch (<2.7) to fix pipeline (#1022) Co-authored-by: imstevenpmwork Co-authored-by: Simon Alibert <75076266+aliberts@users.noreply.github.com> --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4b858634de..0ba30c9cb7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,7 @@ dependencies = [ "datasets>=2.19.0", "deepdiff>=7.0.1", "diffusers>=0.27.2", - "draccus>=0.10.0", + "draccus==0.10.0", "einops>=0.8.0", "flask>=3.0.3", "gdown>=5.1.0", @@ -68,7 +68,7 @@ dependencies = [ "pyzmq>=26.2.1", "rerun-sdk>=0.21.0", "termcolor>=2.4.0", - "torch>=2.2.1", + "torch>=2.2.1,<2.7", "torchcodec>=0.2.1; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", "torchvision>=0.21.0", "wandb>=0.16.3", From 3ce6e22f18129e72b1c52dd0e2d0eb8fd499287e Mon Sep 17 00:00:00 2001 From: Adil Zouitine Date: Thu, 24 Apr 2025 12:16:02 +0200 Subject: [PATCH 05/88] fix(ci): Pin `torchcodec` (==0.2.1) to fix pipeline temporarly (#1030) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0ba30c9cb7..db3d8e21cf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,7 +69,7 @@ dependencies = [ "rerun-sdk>=0.21.0", "termcolor>=2.4.0", "torch>=2.2.1,<2.7", - "torchcodec>=0.2.1; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", + "torchcodec==0.2.1; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", "torchvision>=0.21.0", "wandb>=0.16.3", "zarr>=2.17.0", From 2e5aab3ccac5c01b001e3ac693a3414e69c69ff4 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Mon, 28 Apr 2025 09:00:32 +0200 Subject: [PATCH 06/88] Update tutorial (#1021) Co-authored-by: Simon Alibert <75076266+aliberts@users.noreply.github.com> --- README.md | 31 +- examples/10_use_so100.md | 26 +- examples/12_use_so101.md | 697 ++++++++++++++++++ lerobot/__init__.py | 1 + .../common/robot_devices/robots/configs.py | 63 ++ .../robots/feetech_calibration.py | 8 + .../robot_devices/robots/manipulator.py | 6 +- lerobot/common/robot_devices/robots/utils.py | 3 + media/so101/follower_middle.webp | Bin 0 -> 65416 bytes media/so101/follower_rest.webp | Bin 0 -> 41608 bytes media/so101/follower_rotated.webp | Bin 0 -> 46092 bytes media/so101/follower_zero.webp | Bin 0 -> 65314 bytes media/so101/leader_middle.webp | Bin 0 -> 35576 bytes media/so101/leader_rest.webp | Bin 0 -> 39650 bytes media/so101/leader_rotated.webp | Bin 0 -> 38470 bytes media/so101/leader_zero.webp | Bin 0 -> 30790 bytes media/so101/so101-leader.webp | Bin 0 -> 154650 bytes media/so101/so101.webp | Bin 0 -> 133522 bytes 18 files changed, 809 insertions(+), 26 deletions(-) create mode 100644 examples/12_use_so101.md create mode 100644 media/so101/follower_middle.webp create mode 100644 media/so101/follower_rest.webp create mode 100644 media/so101/follower_rotated.webp create mode 100644 media/so101/follower_zero.webp create mode 100644 media/so101/leader_middle.webp create mode 100644 media/so101/leader_rest.webp create mode 100644 media/so101/leader_rotated.webp create mode 100644 media/so101/leader_zero.webp create mode 100644 media/so101/so101-leader.webp create mode 100644 media/so101/so101.webp diff --git a/README.md b/README.md index 3ca20147b0..946693350d 100644 --- a/README.md +++ b/README.md @@ -23,21 +23,35 @@

-

- Build Your Own SO-100 Robot!

+

+ Build Your Own SO-101 Robot!

- SO-100 leader and follower arms - -

Meet the SO-100 – Just $110 per arm!

+
+ SO-101 follower arm + SO-101 leader arm +
+ + +

Meet the updated SO100, the SO-101 – Just €114 per arm!

Train it in minutes with a few simple moves on your laptop.

Then sit back and watch your creation act autonomously! 🤯

-

- Get the full SO-100 tutorial here.

+

+ See the full SO-101 tutorial here.

-

Want to take it to the next level? Make your SO-100 mobile by building LeKiwi!

+

Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!

Check out the LeKiwi tutorial and bring your robot to life on wheels.

LeKiwi mobile robot @@ -51,7 +65,6 @@ --- - 🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. 🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning. diff --git a/examples/10_use_so100.md b/examples/10_use_so100.md index 9dbe974c14..9385c7f575 100644 --- a/examples/10_use_so100.md +++ b/examples/10_use_so100.md @@ -445,18 +445,16 @@ For the leader configuration, perform **Steps 1–23**. Make sure that you remov ## E. Calibrate -Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one SO-100 robot to work on another. +Next, you'll need to calibrate your SO-100 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. +The calibration process is very important because it allows a neural network trained on one SO-100 robot to work on another. -#### a. Manual calibration of follower arm +#### Manual calibration of follower arm -> [!IMPORTANT] -> Contrarily to step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the auto calibration, we will actually do manual calibration of follower for now. +You will need to move the follower arm to these positions sequentially, note that the rotated position is on the right side of the robot and you have to open the gripper fully. -You will need to move the follower arm to these positions sequentially: - -| 1. Zero position | 2. Rotated position | 3. Rest position | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| SO-100 follower arm zero position | SO-100 follower arm rotated position | SO-100 follower arm rest position | +| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SO-101 leader arm middle position | SO-101 leader arm zero position | SO-101 leader arm rotated position | SO-101 leader arm rest position | Make sure both arms are connected and run this script to launch manual calibration: ```bash @@ -467,12 +465,12 @@ python lerobot/scripts/control_robot.py \ --control.arms='["main_follower"]' ``` -#### b. Manual calibration of leader arm -Follow step 6 of the [assembly video](https://youtu.be/FioA2oeFZ5I?t=724) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially: +#### Manual calibration of leader arm +You will also need to move the leader arm to these positions sequentially: -| 1. Zero position | 2. Rotated position | 3. Rest position | -| ------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| SO-100 leader arm zero position | SO-100 leader arm rotated position | SO-100 leader arm rest position | +| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SO-100 leader arm middle position | SO-100 leader arm zero position | SO-100 leader arm rotated position | SO-100 leader arm rest position | Run this script to launch manual calibration: ```bash diff --git a/examples/12_use_so101.md b/examples/12_use_so101.md new file mode 100644 index 0000000000..161712c3b9 --- /dev/null +++ b/examples/12_use_so101.md @@ -0,0 +1,697 @@ +# Assemble and use SO-101 + +In the steps below we explain how to assemble and use our flagship robot, the SO-101 with LeRobot 🤗. + +## Source the parts + +Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts, +and advice if it's your first time printing or if you don't own a 3D printer. + +Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly. + +## Install LeRobot + +> [!TIP] +> We use the Command Prompt (cmd) quite a lot. If you are not comfortable using the cmd or want to brush up using the command line you can have a look here: [Command line crash course](https://developer.mozilla.org/en-US/docs/Learn_web_development/Getting_started/Environment_setup/Command_line) + +Download our source code: +```bash +git clone https://github.com/huggingface/lerobot.git +cd lerobot +``` + +Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/miniconda/install/#quick-command-line-install): +```bash +conda create -y -n lerobot python=3.10 +``` +Now restart the shell by running: + +##### Windows: +```bash +`source ~/.bashrc` +``` + +##### Mac: +```bash +`source ~/.bash_profile` +``` + +##### zshell: +```bash +`source ~/.zshrc` +``` + +Then activate your conda environment, you have to do this each time you open a shell to use lerobot: +```bash +conda activate lerobot +``` + +When using `miniconda`, install `ffmpeg` in your environment: +```bash +conda install ffmpeg -c conda-forge +``` + +> [!NOTE] +> This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can: +> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using: +> ```bash +> conda install ffmpeg=7.1.1 -c conda-forge +> ``` +> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`. + +Install 🤗 LeRobot: +```bash +cd lerobot && pip install ".[feetech]" +``` + +> [!NOTE] +> If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run: `sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg) + + +## Configure motors + +To configure the motors designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm. It's convenient to label them and write on each motor if it's for the follower `F` or for the leader `L` and it's ID from 1 to 6. + +You now should plug the 5V or 12V power supply to the motor bus. 5V for the STS3215 7.4V motors and 12V for the STS3215 12V motors. Note that the leader arm always uses the 7.4V motors, so watch out that you plug in the right power supply if you have 12V and 7.4V motors, otherwise you might burn your motors! Now, connect the motor bus to your computer via USB. Note that the USB doesn't provide any power, and both the power supply and USB have to be plugged in. + +### Find the USB ports associated to each arm + +To find the port for each bus servo adapter, run this script: +```bash +python lerobot/scripts/find_motors_bus_port.py +``` +#### Example outputs of script + +##### Mac: +Example output leader arm's port: `/dev/tty.usbmodem575E0031751` + +```bash +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect leader arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0031751 +Reconnect the usb cable. +``` + +Example output follower arm port: `/dev/tty.usbmodem575E0032081` + +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect follower arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the usb cable. +``` + +##### Linux: +On Linux, you might need to give access to the USB ports by running: +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +Example output leader arm port: `/dev/ttyACM0` + +```bash +Finding all available ports for the MotorBus. +['/dev/ttyACM0', '/dev/ttyACM1'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect leader arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM0 +Reconnect the usb cable. +``` + +Example output follower arm port: `/dev/ttyACM1` + +``` +Finding all available ports for the MotorBus. +['/dev/ttyACM0', '/dev/ttyACM1'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect follower arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM1 +Reconnect the usb cable. +``` + +#### Update config file + +Now that you have your ports, update the **port** default values of [`SO101RobotConfig`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robot_devices/robots/configs.py). +You will find something a class called `so101` where you can update the `port` values with your actual motor ports: +```python +@RobotConfig.register_subclass("so101") +@dataclass +class So101RobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/so101" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) +``` + +Here is a video of the process: + + + +### Set motor IDs + +Now we need to set the motor ID for each motor. Plug your motor in only one of the two ports of the motor bus and run this script to set its ID to 1. Replace the text after --port to the corresponding control board port. +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem58760432961 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 1 +``` + +Then unplug your motor and plug the second motor and set its ID to 2. +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem58760432961 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 2 +``` + +Redo this process for all your motors until ID 6. Do the same for the 6 motors of the leader arm, but make sure to change the power supply if you use motors with different voltage. + +Here is a video of the process: + + + +## Step-by-Step Assembly Instructions + +### Clean Parts +Remove all support material from the 3D-printed parts. + +### Joint 1 + +- Place the first motor into the base. +- Fasten the motor with 4 M2x6mm screws (smallest screws). Two from the top and two from bottom. +- Slide over the first motor holder and fasten it using two M2x6mm screws (one on each side). +- Install both motor horns, securing the top horn with a M3x6mm screw. +- Attach the shoulder part. +- Tighten the shoulder part with 4 M3x6mm screws on top and 4 M3x6mm screws on the bottom +- Add the shoulder motor holder. + + + +### Joint 2 + +- Slide the second motor in from the top. +- Fasten the second motor with 4 M2x6mm screws. +- Attach both motor horns to motor 2, again use the M3x6mm horn screw. +- Attach the upper arm with 4 M3x6mm screws on each side. + + + +### Joint 3 + +- Insert motor 3 and fasten using 4 M2x6mm screws +- Attach both motor horns to motor 3 and secure one again with a M3x6mm horn screw. +- Connect the forearm to motor 3 using 4 M3x6mm screws on each side. + + + +### Joint 4 + +- Slide over motor holder 4. +- Slide in motor 4. +- Fasten motor 4 with 4 M2x6mm screws and attach its motor horns, use a M3x6mm horn screw. + + + +### Joint 5 + +- Insert motor 5 into the wrist holder and secure it with 2 M2x6mm front screws. +- Install only one motor horn on the wrist motor and secure it with a M3x6mm horn screw. +- Secure the wrist to motor 4 using 4 M3x6mm screws on both sides. + + + +### Gripper / Handle + +#### Follower: + +- Attach the gripper to motor 5, attach it to the motor horn on the wrist using 4 M3x6mm screws. +- Insert the gripper motor and secure it with 2 M2x6mm screws on each side. +- Attach the motor horns and again use a M3x6mm horn screw. +- Install the gripper claw and secure it with 4 M3x6mm screws on both sides. + + + +#### Leader: + +- Mount the leader holder onto the wrist and secure it with 4 M3x6mm screws. +- Attach the handle to motor 5 using 1 M2x6mm screw. +- Insert the gripper motor, secure it with 2 M2x6mm screws on each side, attach a motor horn using a M3x6mm horn screw. +- Attach the follower trigger with 4 M3x6mm screws. + + + +##### Wiring + +- Attach the motor controller on the back. +- Then insert all wires, use the wire guides everywhere to make sure the wires don't unplug themself and stay in place. + + + +## Calibrate + +Next, you'll need to calibrate your SO-101 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. +The calibration process is very important because it allows a neural network trained on one SO-101 robot to work on another. + +#### Manual calibration of follower arm + +You will need to move the follower arm to these positions sequentially, note that the rotated position is on the right side of the robot and you have to open the gripper fully. + +| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SO-101 leader arm middle position | SO-101 leader arm zero position | SO-101 leader arm rotated position | SO-101 leader arm rest position | + +Make sure both arms are connected and run this script to launch manual calibration: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --robot.cameras='{}' \ + --control.type=calibrate \ + --control.arms='["main_follower"]' +``` + +#### Manual calibration of leader arm +You will also need to move the leader arm to these positions sequentially: + +| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SO-101 leader arm middle position | SO-101 leader arm zero position | SO-101 leader arm rotated position | SO-101 leader arm rest position | + +Run this script to launch manual calibration: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --robot.cameras='{}' \ + --control.type=calibrate \ + --control.arms='["main_leader"]' +``` +## Control your robot + +Congrats 🎉, your robot is all set to learn a task on its own. Next we will explain you how to train a neural network to autonomously control a real robot. + +**You'll learn to:** +1. How to record and visualize your dataset. +2. How to train a policy using your data and prepare it for evaluation. +3. How to evaluate your policy and visualize the results. + +By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934). + +This tutorial is specifically made for the affordable [SO-101](https://github.com/TheRobotStudio/SO-ARM100) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The SO-101 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot. + +During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously. + +If you encounter any issues at any step of the tutorial, feel free to seek help on [Discord](https://discord.com/invite/s3KuuzsPFb) or don't hesitate to iterate with us on the tutorial by creating issues or pull requests. + +## Teleoperate + +Run this simple script to teleoperate your robot (it won't connect and display the cameras): +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --robot.cameras='{}' \ + --control.type=teleoperate +``` + +The teleoperate command will automatically: +1. Identify any missing calibrations and initiate the calibration procedure. +2. Connect the robot and start teleoperation. + +## Setup Cameras + +To connect a camera you have three options: +1. OpenCVCamera which allows us to use any camera: usb, realsense, laptop webcam +2. iPhone camera with MacOS +3. Phone camera on Linux + +### Use OpenCVCamera + +The [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py) class allows you to efficiently record frames from most cameras using the [`opencv2`](https://docs.opencv.org) library. For more details on compatibility, see [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html). + +To instantiate an [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py), you need a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera like a webcam of a laptop, the camera index is usually `0` but it might differ, and the camera index might change if you reboot your computer or re-plug your camera. This behavior depends on your operating system. + +To find the camera indices, run the following utility script, which will save a few frames from each detected camera: +```bash +python lerobot/common/robot_devices/cameras/opencv.py \ + --images-dir outputs/images_from_opencv_cameras +``` + +The output will look something like this if you have two cameras connected: +``` +Mac or Windows detected. Finding available camera indices through scanning all indices from 0 to 60 +[...] +Camera found at index 0 +Camera found at index 1 +[...] +Connecting cameras +OpenCVCamera(0, fps=30.0, width=1920.0, height=1080.0, color_mode=rgb) +OpenCVCamera(1, fps=24.0, width=1920.0, height=1080.0, color_mode=rgb) +Saving images to outputs/images_from_opencv_cameras +Frame: 0000 Latency (ms): 39.52 +[...] +Frame: 0046 Latency (ms): 40.07 +Images have been saved to outputs/images_from_opencv_cameras +``` + +Check the saved images in `outputs/images_from_opencv_cameras` to identify which camera index corresponds to which physical camera (e.g. `0` for `camera_00` or `1` for `camera_01`): +``` +camera_00_frame_000000.png +[...] +camera_00_frame_000047.png +camera_01_frame_000000.png +[...] +camera_01_frame_000047.png +``` + +Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green. + +Now that you have the camera indexes, you should change then in the config. You can also change the fps, width or height of the camera. + +The camera config is defined per robot, can be found here [`RobotConfig`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robot_devices/robots/configs.py) and looks like this: +```python +cameras: dict[str, CameraConfig] = field( + default_factory=lambda: { + "wrist": OpenCVCameraConfig( + camera_index=0, <-- UPDATE HERE + fps=30, + width=640, + height=480, + ), + "base": OpenCVCameraConfig( + camera_index=1, <-- UPDATE HERE + fps=30, + width=640, + height=480, + ), + } + ) +``` + +### Use your phone +#### Mac: + +To use your iPhone as a camera on macOS, enable the Continuity Camera feature: +- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later. +- Sign in both devices with the same Apple ID. +- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection. + +For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac). + +Your iPhone should be detected automatically when running the camera setup script in the next section. + +#### Linux: + +If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera + +1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using: +```python +sudo apt install v4l2loopback-dkms v4l-utils +``` +2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android. +3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org): +```python +flatpak install flathub com.obsproject.Studio +``` +4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with: +```python +flatpak install flathub com.obsproject.Studio.Plugin.DroidCam +``` +5. *Start OBS Studio*. Launch with: +```python +flatpak run com.obsproject.Studio +``` +6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`. +7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in. +8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide). +9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices: +```python +v4l2-ctl --list-devices +``` +You should see an entry like: +``` +VirtualCam (platform:v4l2loopback-000): +/dev/video1 +``` +10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`. +```python +v4l2-ctl -d /dev/video1 --get-fmt-video +``` +You should see an entry like: +``` +>>> Format Video Capture: +>>> Width/Height : 640/480 +>>> Pixel Format : 'YUYV' (YUYV 4:2:2) +``` + +Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed. + +If everything is set up correctly, you can proceed with the rest of the tutorial. + +### Add wrist camera +If you have an additional camera you can add a wrist camera to the SO101. There are already many premade wrist camera holders that you can find in the SO101 repo: [Wrist camera's](https://github.com/TheRobotStudio/SO-ARM100#wrist-cameras) + +## Teleoperate with cameras + +We can now teleoperate again while at the same time visualzing the camera's and joint positions with `rerun`. + +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --control.type=teleoperate \ + --control.display_data=true +``` + +## Record a dataset + +Once you're familiar with teleoperation, you can record your first dataset with SO-100. + +We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you've can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). + +Add your token to the cli by running this command: +```bash +huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential +``` + +Then store your Hugging Face repository name in a variable: +```bash +HF_USER=$(huggingface-cli whoami | head -n 1) +echo $HF_USER +``` + +Now you can record a dataset, to record 2 episodes and upload your dataset to the hub execute this command: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/so101_test \ + --control.tags='["so101","tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=2 \ + --control.display_data=true \ + --control.push_to_hub=true +``` + +You will see a lot of lines appearing like this one: +``` +INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz) +``` +It contains: +- `2024-08-10 15:02:58` which is the date and time of the call to the print function, +- `ol_robot.py:219` which is the end of the file name and the line number where the print function is called (`lerobot/scripts/control_robot.py` line `219`). +- `dt:33.34 (30.0hz)` which is the "delta time" or the number of milliseconds spent between the previous call to `robot.teleop_step(record_data=True)` and the current one, associated with the frequency (33.34 ms equals 30.0 Hz) ; note that we use `--fps 30` so we expect 30.0 Hz ; when a step takes more time, the line appears in yellow. +- `dtRlead: 5.06 (197.5hz)` which is the delta time of reading the present position of the leader arm. +- `dtWfoll: 0.25 (3963.7hz)` which is the delta time of writing the goal position on the follower arm ; writing is asynchronous so it takes less time than reading. +- `dtRfoll: 6.22 (160.7hz)` which is the delta time of reading the present position on the follower arm. +- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchronously. +- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchronously. + +#### Dataset upload +Locally your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/so101_test`). At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running: +```bash +echo https://huggingface.co/datasets/${HF_USER}/so101_test +``` +Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example). + +You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot). + +#### Record function + +The `record` function provides a suite of tools for capturing and managing data during robot operation: +1. Set the flow of data recording using command line arguments: + - `--control.warmup_time_s=10` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default). + - `--control.episode_time_s=60` defines the number of seconds for data recording for each episode (60 seconds by default). + - `--control.reset_time_s=60` defines the number of seconds for resetting the environment after each episode (60 seconds by default). + - `--control.num_episodes=50` defines the number of episodes to record (50 by default). +2. Control the flow during data recording using keyboard keys: + - Press right arrow `->` at any time during episode recording to early stop and go to resetting. Same during resetting, to early stop and to go to the next episode recording. + - Press left arrow `<-` at any time during episode recording or resetting to early stop, cancel the current episode, and re-record it. + - Press escape `ESC` at any time during episode recording to end the session early and go straight to video encoding and dataset uploading. +3. Checkpoints are done set during recording, so if any issue occurs, you can resume recording by re-running the same command again with `--control.resume=true`. You will need to manually delete the dataset directory if you want to start recording from scratch. + +#### Tips for gathering data + +Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images. + +In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions. + +Avoid adding too much variation too quickly, as it may hinder your results. + +#### Troubleshooting: +- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux). + +## Visualize a dataset + +If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: +```bash +echo ${HF_USER}/so101_test +``` + +If you didn't upload with `--control.push_to_hub=false`, you can visualize it locally with (via a window in the browser `http://127.0.0.1:9090` with the visualization tool): +```bash +python lerobot/scripts/visualize_dataset_html.py \ + --repo-id ${HF_USER}/so101_test \ + --local-files-only 1 +``` + +This will launch a local web server that looks like this: + +
+ Koch v1.1 leader and follower arms +
+ +## Replay an episode + +A useful feature is the `replay` function, which allows to replay on your robot any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model. + +You can replay the first episode on your robot with: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --control.type=replay \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/so101_test \ + --control.episode=0 +``` + +Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com). + +## Train a policy + +To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +```bash +python lerobot/scripts/train.py \ + --dataset.repo_id=${HF_USER}/so101_test \ + --policy.type=act \ + --output_dir=outputs/train/act_so101_test \ + --job_name=act_so101_test \ + --policy.device=cuda \ + --wandb.enable=true +``` + +Let's explain the command: +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. +5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. + +Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`. + +To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy: +```bash +python lerobot/scripts/train.py \ + --config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \ + --resume=true +``` + +#### Upload policy checkpoints + +Once training is done, upload the latest checkpoint with: +```bash +huggingface-cli upload ${HF_USER}/act_so101_test \ + outputs/train/act_so101_test/checkpoints/last/pretrained_model +``` + +You can also upload intermediate checkpoints with: +```bash +CKPT=010000 +huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \ + outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model +``` + +## Evaluate your policy + +You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/eval_act_so101_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=10 \ + --control.push_to_hub=true \ + --control.policy.path=outputs/train/act_so101_test/checkpoints/last/pretrained_model +``` + +As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: +1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`). diff --git a/lerobot/__init__.py b/lerobot/__init__.py index d61e4853e6..f8acafce37 100644 --- a/lerobot/__init__.py +++ b/lerobot/__init__.py @@ -181,6 +181,7 @@ "koch_bimanual", "aloha", "so100", + "so101", "moss", ] diff --git a/lerobot/common/robot_devices/robots/configs.py b/lerobot/common/robot_devices/robots/configs.py index e940b442fc..844d691158 100644 --- a/lerobot/common/robot_devices/robots/configs.py +++ b/lerobot/common/robot_devices/robots/configs.py @@ -431,6 +431,69 @@ class MossRobotConfig(ManipulatorRobotConfig): mock: bool = False +@RobotConfig.register_subclass("so101") +@dataclass +class So101RobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/so101" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem58760431091", + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + cameras: dict[str, CameraConfig] = field( + default_factory=lambda: { + "laptop": OpenCVCameraConfig( + camera_index=0, + fps=30, + width=640, + height=480, + ), + "phone": OpenCVCameraConfig( + camera_index=1, + fps=30, + width=640, + height=480, + ), + } + ) + + mock: bool = False + + @RobotConfig.register_subclass("so100") @dataclass class So100RobotConfig(ManipulatorRobotConfig): diff --git a/lerobot/common/robot_devices/robots/feetech_calibration.py b/lerobot/common/robot_devices/robots/feetech_calibration.py index 2c1e7180e8..343a6a282e 100644 --- a/lerobot/common/robot_devices/robots/feetech_calibration.py +++ b/lerobot/common/robot_devices/robots/feetech_calibration.py @@ -36,6 +36,12 @@ ROTATED_POSITION_DEGREE = 90 +def reset_middle_positions(arm: MotorsBus): + input("Please move the robot to the new middle position for calibration, then press Enter...") + # Write 128 to Torque_Enable for all motors. + arm.write("Torque_Enable", 128) + + def assert_drive_mode(drive_mode): # `drive_mode` is in [0,1] with 0 means original rotation direction for the motor, and 1 means inverted. if not np.all(np.isin(drive_mode, [0, 1])): @@ -439,6 +445,8 @@ def run_arm_manual_calibration(arm: MotorsBus, robot_type: str, arm_name: str, a print(f"\nRunning calibration of {robot_type} {arm_name} {arm_type}...") + reset_middle_positions(arm) + print("\nMove arm to zero position") print("See: " + URL_TEMPLATE.format(robot=robot_type, arm=arm_type, position="zero")) input("Press Enter to continue...") diff --git a/lerobot/common/robot_devices/robots/manipulator.py b/lerobot/common/robot_devices/robots/manipulator.py index 9173abc628..ebf7c3994d 100644 --- a/lerobot/common/robot_devices/robots/manipulator.py +++ b/lerobot/common/robot_devices/robots/manipulator.py @@ -243,7 +243,7 @@ def connect(self): if self.robot_type in ["koch", "koch_bimanual", "aloha"]: from lerobot.common.robot_devices.motors.dynamixel import TorqueMode - elif self.robot_type in ["so100", "moss", "lekiwi"]: + elif self.robot_type in ["so100", "so101", "moss", "lekiwi"]: from lerobot.common.robot_devices.motors.feetech import TorqueMode # We assume that at connection time, arms are in a rest position, and torque can @@ -260,7 +260,7 @@ def connect(self): self.set_koch_robot_preset() elif self.robot_type == "aloha": self.set_aloha_robot_preset() - elif self.robot_type in ["so100", "moss", "lekiwi"]: + elif self.robot_type in ["so100", "so101", "moss", "lekiwi"]: self.set_so100_robot_preset() # Enable torque on all motors of the follower arms @@ -313,7 +313,7 @@ def load_or_run_calibration_(name, arm, arm_type): calibration = run_arm_calibration(arm, self.robot_type, name, arm_type) - elif self.robot_type in ["so100", "moss", "lekiwi"]: + elif self.robot_type in ["so100", "so101", "moss", "lekiwi"]: from lerobot.common.robot_devices.robots.feetech_calibration import ( run_arm_manual_calibration, ) diff --git a/lerobot/common/robot_devices/robots/utils.py b/lerobot/common/robot_devices/robots/utils.py index dab514d5ec..768d49dbc2 100644 --- a/lerobot/common/robot_devices/robots/utils.py +++ b/lerobot/common/robot_devices/robots/utils.py @@ -23,6 +23,7 @@ MossRobotConfig, RobotConfig, So100RobotConfig, + So101RobotConfig, StretchRobotConfig, ) @@ -58,6 +59,8 @@ def make_robot_config(robot_type: str, **kwargs) -> RobotConfig: return MossRobotConfig(**kwargs) elif robot_type == "so100": return So100RobotConfig(**kwargs) + elif robot_type == "so101": + return So101RobotConfig(**kwargs) elif robot_type == "stretch": return StretchRobotConfig(**kwargs) elif robot_type == "lekiwi": diff --git a/media/so101/follower_middle.webp b/media/so101/follower_middle.webp new file mode 100644 index 0000000000000000000000000000000000000000..7f22d2563dfb4e247145740fed2aece3077941c0 GIT binary patch literal 65416 zcmV(!K;^$uNk&G1{{R42MM6+kP&goT{{R3G<_4VsDgXum2tI8#mr0}|r7EDY__>L)S?kJ$Z^N%V){GKb3Cd`y8?MzsdiN`R1y>{_*ei zpZ>GE_V#r{=n4A0GJ0wMtreox>R-nGkbG}ZU+eJ2|98zh zoj>n-zyHDOhxLcB|LfmUe>eZQ9;*NHzi>MXKa~Hi>&56!|IO)R`!kr!^Pm3Sj{b}O zKZF0AeZTwP@xQ!(E4BONclD3WzM1y*{%47QHMJ4^yZWE|Z&0}F_5bnzZas)T3j0s} zkGp@EkJ|KO`Oo*>z}|Iuw@^>YGyUJ(PbLrdzx{n-!ce~4&$luC+fB^-U@_dmiD*ud zFEhf1(NFs3@rvUJ!xiC5Jmtmae=g{j>#=B9S99#V&$lz}$>zlCen~-eI2U49t8iJ{ z_)@{HqqkFL;d8i4b=a3BFAt1akY(&hDYX2A7dJXkh;}(R1gWC@Td)w+LJX9ekr-{{ zQG|YaNe8)!mr}`@TNLBElp+Rnk`y-xDm?S^ArUM%XTN$eK5eI z3*V01$q^6ldu+bW;);Zj+=USwxkBcBY8jJrk@D+Y5RoGeyY%?6;8%^7+cb)Yw525N zr-dj^w6UA`?I$6$PH3}8_V#;EnDrN$3z;7vj-RwAVhc0nJy}=8v#|mcd`NdjAfzKa zV`!CHhUawqNJXO+)3h{C&6)n1o}1%?3`nvTrWmCp!^{`Ay}BKpX0?;jZ(x^f;53XB zS~>pOCkg4W^OqNzJ`#%h`a$tuf**dD(yFPbd-S5YQ?a-vPP|U=r|lkl--vLK?f<;m zvTGXUkvP8R;$e0WoTy^$Tqa5rx`=*Wr4iG0eiOk*?#SAxDAa4YGLj8p^%_$X=Q0ta zI?a+Vl(IIelPTjhU1{cdsHu1wfGBOowp*6uPS%uoRIkh-%3D@Bi3Ply=Qk%VE}ce^ z7HpUk5Ah?0)y*mi<5fxkUK1%BwlMx~Tb8Oh>L-yO)<0}0PjUgi;T);JPzArRTNFwS z*u7#vy}W$rkoMdr3$tH6(DQvrI!WcpgR=(uiMt{Mc6CkC2s=o?OVpXLp*qMR<9w@k z80O=+H63CAxzU9rW(pi02dOhImJ=@H&JIhwS3e3%)+BTaTryCC=ot>ecZDeSH?;~? zH2aew$lq1g(hG!jnI?G!kE{uIqdv>fWT)Mkh+%q54Oa5Pw^$|&grk^+{B)XLS2QjP z!%8jVEa^GTgOHpQa`c-eypOVe*5>4MKH7i3=oBqn{#}h#O#0RGyp#!r;o{mY!d_H? zO1K4d{vilWrgC*t>7KG=xeaLJVYJAm6uV7L;JX ziZNYQs28NzP@RF{Tc69YJTL;-tBRpnFRt#aL9l0hmMZ;Ez8Fpvr8FxpSbOf18TK@M zSA|W<)>GVL61{MAhzcEzu(Ud+lb6?)A!fJ{k32am_0o~J)m9(>^rSaBiJfBLBO`Z=wh(}o8E#kpfn2YvpN0y({;whKTHT^gzkvtO?*jz-$-dL_C5BtVVw_5(HwcO z?o`v`L(Vs*VRIZjn(nm1(C` z^BLO65y{zlO@$uZzz~fIwz|g|Z7XEV`*6E?YH=BQJ9v1vmM0>##9-sAsmhRpPbeB4Ogf zVeUkuBw|yMl47t^@%6BU-JreN6-w{|VU|i)a3}X;ynO{$(g>n@HQXpdy~fYsNrm#W^p)VIFF)nc2hA#Gm;$-|UZxzo3bF?gzy4q z1=f8c>%FSq$R5jF+|U+IR55nEg*86okSjkA%kcF`AAwesZC-~`CgsN6D#X}^VjETC z*hlvfA93T50oY4*-DcX{xp8@uu-J5uw_%@@b9X)?fmk(wo~*s?W}UbKG|@W2RBZz; ziB8P4J_k+HIeESZJJ1Kc9dTw=rF5h8ZW2pkDG__i=(5Cgfo-@3)KT;wllQIL^ph85 z*23<616CS`AdyhBw*ozs3L)6U0CAaZe!MP;fs4t#JZ9)JRZf6h{k0Y6p8>G0=p7U; zd<7$QVMRSX{Yj$gM4h{BQrn?L zu@i9Pgz$D=-Z0|wvzWO?Oy~zL066Y|9imY{B+aOqtUiV)4iXHaF<^V^BTG4+JT^51 ziq|Y1TfUQC`J16&00BPUxv*;|ik>V&wL2MwNe$~Wplp2VuQm{!;|@dw3=0_v_*3?e zJP|(M+(C=z!#x(C!I1iXS?99OpS`=Qc^AP@w6wFgCv$#>mxz$2S_+cxrS4aD1wOzw z;%VC;rCflB;85+!zh_|ai6^&zZ31dIhpaoqfb(L?`5Z9 z=PoZZ>;i=4#p?=XSnh8bVHCvq+t^MMN(p){>Ofx`iE$BLG(<3`;Y<#AuD>0#X47@1 zdGBmwS&yG%k~LmY=@&C$NY_L$+}gb~b`L_$_5&3D6sWFW+LJA`bkMRfUKFF-p?hY* zo@vCc3I+{$DeMS8GSe^d+-t=~y|=yRw!2VZ=awVTg$cZ3x?vIIc48`Q9%qXP8sJEr zcxp?SzHX{*x-?gcEi-IFLcUJ2gUmJh+nS&<5B>&;aT>)`6y0+ILD|( zRN^6uYciqK1N2inBy{dD{p>u`@RN}oi9aizI4JYpS(kvAsUW9%3Eu#7J9NkV>G)D- z?e7jJlOD48d4+(@^ZDb0_O6~T_ur_=d)mdd1SyD%Mk;K(wN=itav5vM&HFo{4UM?E zowYj>-ubwoJF`yxg9~)Iv6RY_RNA`ch#U~xxpb8UE8-oP*?Od`pLVU|5pTRSC$a1; zg`7`^O#~R_yX7c3$N{^4>MR;;nzWt0F;hyh?JXwUE!j=--3Rk_78{} z9#33(GSIJlmvwNW?Pn;!WS881zQ|>xoLm+s}ntfEWe|G!TSv*{4-jL z5Wk1JHqDc@+Uu|z80L!*GE10-fZ(e-e&#%}BSK}*MeEtF)tSC!=>3HzXlqLBv*DS& zP;t4vJ#5w@`EghpWq|_5`gKwxDVyt=*Jjx{Ez_rZ*pcX*67@|i4qaOGGGZs-wrPTd zISPZ8=lvCT%Va1|WwK;+_{6LKTlDl#bgc(1c7#cwSIrST-yN|zUhaQJ_R(d!6P1$Tw80_$uwhthX@4LJQKfwT!7pAdY^Z~XGh%nduiWbNhNE@r5t z3Cs~BK&?%{Rd{b6W1&>cIp=G8RRO6|$nz$>q%OlZMed)cgqX~rlNm39;U*C^pCwqm zzQ;J-iRZXRQE?a#V?W6Q*0W?SFlkGzTg$t$;jj<{76KH}Ux&Ka|5k0fevP7CKYTE! z?rCQyV-!_9Tz{{iqsE>f)oPu_u>G{U8uJl8CgCL@3C2f%Y{{+-wYH|cTh%QU3t#Z{ zCg~-PNGemln!N5-xo0=eM7Idd;0wR{x|eTHBlVY(pY@gsJcsFybh{?Tk200`5urTa z=5JiDMCUCJL;%f_8jtLhmp7VJ>L9vQ8OWu&yvt7;e(lquvz5`*XGNTVW4u(5Dkfr8(!?f>qbVz=vqEQ##4!Wql8 zM|Ahb0h?5x9T1{PmK=3b*VGh-YcQwlc%U^K58oqP1x2y5F19yPmI%0{N`i#*`IwEQ z-2ZMgyZw#h&-MrBnGl8OOO#;i9n8>4-VkqvT_al@TLOL@>EMLfLfK_d#RFHC!?pfg zMDI>vj;r5ZmGY6bW|bH?(_4Y+7q5KF36)#zYr?a%<^g_Yehpy?}rQOT1F5=!O(F8D?x6gylfLcDuPav`t{k=%!#^+Ll zne-(ZbWCT0u8-m6j)xc$?2&G*#DbsT7Exv7v1i`UPrYYeWtqajqI`aSg?7}}_ZbjG zi?euWDk$c-xiRg0Uua&{54ra+?;3(t`N^8nLOgitA61SSNlJ!6oxVs{Kyl?@?7GpZ zMvd=&Y3+QMVj|VvV0;uM!AL!PsBcuHxf1JSJwE^QDz@Pm=ITmy-(vtR$a=P$JYM0* zE4O;$bnjrKaF_DBLPAY9c=BDpaLUuaS^65#<`zD;MNWbZQ~(s*U`E{jJq0I#F9)5F zn>NJ>IlneB%n3I%B`lDXIMewnCYVwMGyG^(L@Ys3MnZ2|$NDQfLQe4Q5-YecKqmOV zVEb+zt7)fzKIJd+?BXjtm=^I@yhz64608na&ndOL@~l(3>?-ZO0=BxxMGtmY_kIXt zq1Z3O_}E|aULGNNK{chm_Qq{o3z4b_FMX7rU5n+Gw?PVf5||$zI9-a|7OvZy8$RLc~ygg`H25&}b#V z>yl^fq9f<#wne5xlUv7x(hS3HF*Q)-0}~l-DNn;jSK!(?t+s{1JreBLzpVG^$m^=j zlj3C_xR!bEJ~~(6ar%bE>U_F>`Ic0~xJz@iwl}+Rj{gdFd=;xz%CwY!2*(vRmTt_d zG*an%oP;E&=r!gz$8MJ`kJ||65u3C&spLk*JEh|6TvxhFn$%}xo7}H?8OD^BJ7H`_ zoFk8VlN2sAMzx@$`7aP^+5>x|l?Rf!2MVM`44-(R($$J`0mOn`^J(N2yXlOCS{_DU z4H>QzPCr0d{Li`_=h0~aML#sL6rr$-2AD|I6YI#je9%d^AN~d$`Z5>xY*(3iqO}g= zO9rgl8)7l=ZOa)ZYxZ}q*5tzNs#Y)V+bUh|3C;+(9?QThk>eB{w!!=M{4PUnE)iE> zM<2Vsc#Iv!!jy9J;a%!4_5MbOe67T;_C`$a2lT5p>*M zR~B`FC+zv~aVn^Rni?@hA!+`~6YvgwQiGRSnXxZ{7}bD+mV{1fhdVreo^jMlDtPto z98>n)%yBTP6>{3A@BAo^5fO_Eu6t{oIbVF`x@y1r{2>j)ArMQ-0H_i_)4Z28+o)ioBv{jqyYwHB1~&wxkM`o^U>+W@5=y!9Ftdt1 z>N?f^`SV&coiSj_ouJWN`@#_h(}N>}2cA{O7EfJs)74No5lxWGI#`aVE5GzS4qbHaz5L0dn=`rO!a=!Eo6Jfg^$YX<@EX|fCsYwj21?PE_ zwynK`h+S_B8ftyHR4Rb$Nva9m7?g}OL( zVV-KGE3gW9*`3l^pYP>qAj>43GPW?4s6ga0TSQ{@aV5p8024bUFTek901SWs{r*IL zu#{SkLkgp22Y%vIs z0c?9N2?C5-cfZ4bNx2Bg-&A6Q&mIwJ|NfUrpXH^FBWTF(2}5^4ZcV5Hj}QJr#dFg_ zgOZ!DT9&yN9djvv`gmMZ_d(w!fql>$xCsnBh*7qG@EbBy8OS+5_g;|$il#ARLOGf{ z8qBlhLK_wL)1xSkKOfJre(o`e2Zhpl6zHu_V6mK5^25z0{!Oq>ll!$3pil;}#9KJ~ z#C(3>k-udU9lx6OO$jHHhTfM(;pHN^^4G7)l|REpRkDSQFex%J8klqDJ%lsAz&8ck%gFV_^p7^8gh?sM z;?QfLj_IkP6*X;0)eC_3QD`O#yG8;1=UcgV!a)|iaDFZR&QtTeN}=&j!n1bWDod> z!JmZ*x(r*W;d~{rf)!$L8{|Q*U^Ju2$%6RZLPqJb{8=RIh`gT*82xmElC;uD$%X8o zlOi@@-B{JT>GnN{`GK)Mg7r<#em#7ScVjCxe+j~AT*LP*`Zjq{BfQScy|oYS5e;P9 z?Vo(P+>aE&MqvKoohi3rrGyuXe&UP&NEEZJ(Y3QVb1tr|qkQK80e$>cVHMhhtdBjP z=0bhFGB-4d=It+X*kuv2j<7kE4)y%~M}Ob{--VSiz2e7A@oX{N6^S+!pc2l*## zRb?L9K zkU47pv!m03f>MGT(u><%d>ZVCO#(oZmDKGZD4NgbA?Ts<_UveFsL0kMwD*OcDX*g1 z?7)CDM!tgL9n|S*ov-!BIR}kTtb3o8_dx*Fk&-@QK4BM9f;YpbTaOnZB+6nHkbR^i z8@c}|rXBu}Nm!ukD9A$R#y;k^O*fTJlb3}%%ZqH;>5UoV)s_@pJzqcO>6?@DMbmr7 zq8{~lZM_(s46VdL*R*jrOa1~Rdug9b%nYfvOsBn*Z z7~4aquBB~IJ}>U1-hmSmzH^R7Q*sU{%v+sisLu0ZR^-np{-=?Z>Sf{1$^t`z;&izh zlvUL;js46SW`FeVP$$`0R}0OAym12WrSXDcuNGHu`y0Mi44~uLX?DoF?5oHR7+iTx z0(D6tSpZIgog~^>_NJkBfis$AFqwX;gDDmfAE&*7YV394!6r-vwgw&2O*;fcyr%{g z8C`3Mdhf5nZWTZT`dIeTT*RsgJ|?difCYdW1ip5k)fOoVd9D$dYjmz9=C;Bxl5){Y z-x_)OygElowJ0mqee+`2d{@1d^tHAr*YWkb0W+_=8dW?habh#AV=);{PU!woZVa*I zu8~q|(9-vE3m%jUbO_UU10K`x<5XK*8PT~_)yDUAqQ>K*vh3%M1{3-SG$5f~Ohhlw zT}ne6V5;hiXY&EQMl|anph0r50CE*aR89k2NkamAoe=C+>EZd5PYY}(?%r4RjU+8@ zBA2MYUPB|`hrATF2$Giq6esJ35qrE_vFx!Ix6>9QkjKv{EzrQ2Qg;HvyE1j^8JjUr zXW%Iir(T%>kf!}(Tm~uAzo%#4)&@H`gqH&fyVWF2e#S`G7zGD#O5KO0hlu@nOxgq_ zR51o8-9>0ms>}if1dX>DF5GJHRQ!egRVGWh5l}{Bf6(0utiL|?X6x2KBP=e-0&UCY zJhNfgF)T{B%+8Qc8A7zrUStGM61aMnC_y6?sLu*2YcO*CA@uT9| z>^Ic!)@*LL#PZ0c=9RkHD_iFsF11&J>Eqfw)*5jvv1fFegD<}!c|UAZaM@OtAyhZw zl{Jeixx!$a5G<|?NlT6P z)mo(u3kHTE0Bf(oYQKvQnSk;sY>S3i<{}Pe8caq~8CWXMI|;IV2J;_bNN`6o^}x~G^YVVgg8(0oAXGX+j^HEZ z9%zNqoU|4w);1y1db67HK9!W*u_qI1bT6G79g zsq(@~93Nz?iqJE}N7x9zj?~W+M4qZmIwJH1Tc2eOd}4$3K8=ktESp%c{3Y)u5eF8F z4GOW9FY*YpiQZj36jt60LmJ2YK3@;|1wKx|TP8~oF)^dWp+90{T1Z7++)Qkq!Uep* zj=wTa1XVS0bZTi<)0}kAObssNig<|I8-`cn5>wDGWzz z$vfFi?TKYE;Ko{JLZos+jfTN6zJOZKF&Hg99AC_x|EP2G-+e(!fbk251B?!dAr2{a zyS>^~6@C@I;h#c`t7G58El+2J8jZh^$i60<{vd>T&L0T%#NFU;KR3iOe-q`jWPgEZ zpvuTBg)*I8HR-fia5iU|YFJn?Tl$h(S}SX;c2UhPt}z7j2aCt@7=?L0f+?%Se!H_$ z)Xt{QPXfdq5AIiOOvl+OZ?q(4&|>w6*Xz-&*Q-&09HQ?b26DSG=+@M`-eaus_ z^}pCt-^?|F!%gC_RPcZg=TR9S|Pf-y#98AdF9{1$=$MskT8-eA5Ej}lMwCF5#n5Qp zByUOl+el%SMr&)+ZBBul-N8GZ+fBm?TG&5T7f=M8$r4e&s(V?PPjYNiR=FtUt%KRj z$wmMdzr;X^-BAKMd9$VUCvPT`czB&wau?1OauWU@A#jLjU$j!@s=%@-8mR(8t2Ry@a5A%I=VE z9et3!c<6B#sCZ5jxDT#?PIu~#&eq`FhYf6%HMi9*%XP%?If$eu{D>=^ng@VnNhVgR zB9;!CJc^#U8U?|o2g}v&%)Kh*JdL3{K0ED@sT_wix(#EH)b9y5NmYV|Y8@-y7v^uZ zjZQ8PrHiu7v}um>*5n_Rh?D|G>2%1pWp=ZT;eGbdd%so@}MZ|C_c;H|7*vH_8abm9{Jhr zT^lO%SeV?#f|^*Z{`X@($nj9r$VQ_;*5SS1vw6tOdqaoAqg3KfEvhzeZSQ!Ue#`pF z8?TtVS#WuGW!J(X3RM}4(qn)L zTYhR?`nVpApLx8QH_=r;OEUl^wPg~_M&#Y8PikSGlLtM|DLT98Vz^}E?P2*_adxU{ zBQakEReBknOwRfL5RRI6fIQymZij(MlkSL{&SR!mT`ToJ8LIzhcNIt|#>3+e1Rx)? zP`dEYG09F~#q}gwxON@ugM(de0Wqg-q;k0+TNQP$$r~NS<};j#u2Z%Fms~esSIz7p zaAWom)OUq?3w{?9>ea?7K8|u>o7ptt`%YDzD0pLa0zQwTy0Ht1dO!j+s|Z=_^!Hk~ zVK~%?Vil$vJM76BGWL7eUYhU26}1bcTfnqh_d$_&ooPqvUz z3JQ`_Sy}~9=2j+plira=x+B7r>d)+|EOmt?9?f+R+qe;OwxS9qPe!p0jUvPt*x>vk zR2e+hcN2GM&tB%CwDXQz;jrR-rQn*8`|Lyk@M~9Kj6v9_vP=Od!6<8=z!{_Yl1~Gp zYj-qHAkt7xAg`cu{F7~auXu=^3mM8;kG@}d_kSgdWoTJufZWK5L-u3BEndi`g^CT=^yidVf`7+OCfRcfDz2 zZds9+{c9q>!{q0N_GK@px@c+JSMV6Ywi@rDro7VP2^?3dh$!r|8Qlm^5e3O#5l^K&b2p(tCHmeURwE_zBsDdN~XEcdoZGWOpvM5G};5EPosztwyEi?7Iyi6q^ zAYKw4=o`8HLaSE1&~i3z@8)Oj$OPd7?fdX(!fAakfr7Ti);mW{U*MRnIY&}sn5pLm zh2U&SXz6p}*>%?I_1m%b5A2@Hz_w2%cd(^wXr><12WBIUl}+C%x2=>Z@{l7_12DWb3g3MH&jsh*DBd?GW8Z&R@5;aB>H z3npWm&nT{I^TfM$ap=pK#eR)CT-OT+bf24C2r<=$730NBznGu8j%@ylh^xq1`-o%z zWQx4NJ0iM2oXvvj6|75N#0c!j7i43wx5uCv-v?mo9EuxHpxeF7P?4b|A>ri09?(s@ zh6>iEgO5@!+R)NnzNY$=Cu+LIG5Z)8rg z=Hf{$h^5sJAF+;7jsrY4_=nM_izWbmsC?vKIwqlmpHIuIqLKJdsOqu}ENzc)xO*^6SGtb6oDnY13q|;9- z2j0V(s!^p=E=~A0l{+wo6JT9(tpabSrg1euv&{S@YegbvV5yoeiEbC49!YWt2&z@; zF$#tUVLswlTTB^v<`CWLODI^p4aCj^WuKd5=Dcz`4#Rhg!U_O#OoSQHfpV}iL>~JF zhVztNMQd-q&k^ldD&PXKVn7dS)0VP0kVEK7yYh+*+Vk5L2+Zq z=^7d#_`51;jzAQHlATFSO3N?{eOnaNYP4TiFUzfH;g$o~>sy9{oZM0(da(~Y>A=Rk z{(^@P7lDN$f!X9h*MoMSLn%2oLy{wNPTN(1t5|mE&f`vnRv$JgN(P<)??q6O)B{@M zr7ISJZ&jFDfbkwG9TeH+JA~)l42LEyQLhTKV#zPWfS{&UZ%;d=>0m^eh71=5po`yJbCHo9 zGZF1C%u&Ghc$qSCsN)70C&nrm6#oQpRdeNndpo3Tt21UZ|3GA#0sK#uJ9HlZ=S)Jr z=+8KD6=$CzE8Sd(XZqP)Kq~3A=bX1$J>6kBgZ)6C2cfoU39*Te;?-@SS9Z`(y z8_gM31?^NC%_m)QwXqZqBS>)U@i+l%5YHGU;m@<_v^su!jINj<+PEdfC^C1p8v)ne zp@MTGeD7e2dVV%8c^J#F1 zTja7Y|Ch{M2m0ijKOMS$vFO#Dx@prn0mk5Nq1{I{ECM^1?Ny} zyuwg4M3kXezvVmZddXc2x$Kvf)Ghvu5>DgL|L>oAg_*3^D8cL@h{?>m+KhB8PR#R% zfgFeiKOch|LJ}u4{DHHZI3mirSg%o>9ehUT3xh%H5y&QD)CT$`Y41WKc(T z#3ggkW+_x76Quq(G!aejJYw`ScUyq82`|fF3%vea+$L|UH>*ca!vb^sb9kSwD>8h0 zCT(>b4WALL@B(4i#+tspjWhHxKpb4`HDrmc4YGhC%GEIhvgZ0Rf#tQ{jc0$jBHGr2ZSv0P%S`+=inak0;0xT# zOJbc4j6e{cU$1JrVJ6WwtSKW~k6YGqU^)x?h(7=N@vcs_YT$!cSX@l|tccsZ=~R`m z1SgURoBuE9)36h{fvI(2_l1o3raW+|*{doYCV)OcxYzHqS}U{nEn#CTRPDgJI8RwnJbGNBGoCzHeBFB8x1n0`Dd?l1veB=v!!hl>;W1zQSk zO#HdD%8duweowH^I8XJnAARfQ956j!?3Pg<&D6JKFYOa{4Pm&wgOghe#A()~XHF9- z>0vXUoWyh5gX>Ie*8f7D-DA@47_SVXxZ*g|bJKc}`77-O6r+Y5L0|DT5Ny(cQjr_F zp)@(18#Ionfv=8dbnH5%7tJUI7VCurr4bz+?d36ezu{(S$-ih5+%y-<*(4$lE^b!4 z$AilIfyNpe?tY5ID1o^!lZN-ih%_EwqVo;T52KpK} zq(no_LHYC)0A+GgaVZc`^fn((?z9|Ph8I8o(E$*{cu1J%l{q#X``l%X$FR8QZXjKz zkJA*LGQhu<=H;boeV9*#Lb;bk$lyZO;P&S}sGh_7r$BIUN$_~snRj+veA06iPCcf0 zAEky|glsFE)IS6X3Y=UE2?or^iVT=001_r~owG;6B@noMb6z2*6lGxZbqna!TPUm$ z(n|L6k#f?NNZiPD=cMdy>|v_aK==2R<0oE0hK$p|Z~urT^>=d(V)SvsK90C=3q#Uv zVZSOLZQx?(kr>(L6O_QH@x5edgG__5%q9__+GY9R8$3R^T=0beW@HeqVD}}p!+}~)x-;F}dvGYR zSCw0Nrw;t2nztVX-dXgwMf?r%XN4uZA^;%iSGO{UcfrIP-J~e3}>wq`}5A715VbHO1o8Lv{D=4c?$|Fbrh`JH~ zoAqs^`w`Jkw=>uf#r;!poWPjRD`Y42+B`rl>bV&$4V^m9f?NQ9nU&C~2))N7tbB9R zV79193X8>R|3nW&B46!{jR;(d*Jld+_x#A zTTdgGDLC8pa}lWRpdxm~MyRl6-l5M5E?x%VeSSFV+)kC7&G^dZkoZl?Gk74RIIPUZ8{mCh(9_F*rYaO}xh3?0Hj&B08&jsGNb zF88ciJ8E?1jgwNZMh63z4fU&*2(@XZ5T_+HjzCZ^Kgl2foQhM{;km6ebk3?GcBP8@kR6Cv6jjEz*!&qLV^z3dEBKW$?-jS5Fz{;(g1XfCJgX$$cDe$faa z{4~N0rsVf%$2G^MPqNPB8DRw|>#KUdMZo=3K!!D9CFPljWx7Oe=yfPtpcTpCLU5%R ztwHp=TfVGlc(X$Ot+?`ghM7np!Bg#+#vNqt+a%W7=|f%CN5CFM)HDM4C43D*v!}NJ z=bjVg)F3TWLoVQ}oS5yJSK+oFcJc|fY_YwgCMelFY zL`_6geo-S>JYOSO8;<|x#6QnHXF#2v7kTwQ%*D&w#cmHU`!Rw8iLZs@c5HgabU2-= zUGjA;*#5227=Y5sfZuxN*498rMLBxQLhLB*>QMUh@_5SaR7R_pA_JNjMwZ=DS!8Il zKEV*`QO-M}Uod7&t1oz+)I-BGgYHwicH^?A;zqGTy*@UMFEjG%4kqkxUKr2=-i#`* z%JM<&btWHpO(BjFuMn889Q&ehOaE!q!Mj{ST9F~uUAyv(Ap1lIGUqhK=>8n`n$t9F zC*js@(izZC%*J?gT!M%vkYDkM;D!S4eHGZ6{h4OyA6^ur&HQs*fR^kEdf~{Eh2@fI zqWeI^%LG8lc|1NMdh!!S_qFF3{?ypLDoDPf#0n(9HW0+S7C;c=c|weQhryNL>-t7G z5tp{6B;(Z#Ud=uKiMoP*P+6o56Un2A&3^c`$ypdM>vRD3oWRfJ9$s#{D`@@nFm_N9 z-Hfjkx=w5?Wp^>x{(;}P0hxhMxH=+kzFJz|&<-lq-j*aj%gp<7cB=@8N+F;qNr=mf z)*ckO@hZn5{x6dQCB2!XMV`rKdS-~--cTA6^!_^!bDKGHh+VD=bv?EZ#%-5|^28Aoz)&{P?T360xSwx@T{GQ|uW zOU%5>WI-txb%VvEpH~TJCQ^LdN0AOuyesxe^55D#oYIRk?=xu$hVHNPzKZBSF?Bt} z3T6ivj9Fod4|^F__J|EX*)o~>!kQ(c9}RNJT@Y73wGApXJBam!c7V5L<2Q@8zZb%` z@Y|0myh>c8;kXwmLaY<6SJ}#e^W-G8hT1OZo6lA5m zJA%%>kjA)$$AsY{idiv2lJacp_TFgrT+|!vywJchr`mTaS@8!4(}~70jULrx z@2dr?K->&s^)K;bZ?g~(`TJ)80RHH%RFpMpTDhOO%L^`eT&d{B!x2>k$HrJ#V97y% zhJH{j>VhD>UNOBfbABc`t`F&gIL_|`Y8qi+yTP;+;54;Atz(@8$?OC4MsGQ3FV@IN zdjH>PPNCWsFzdvoiM(DbcE@cNqA#8K7jmbQp>XG7y(YT;_EBXO5gF8(fd1zc7pEU* zmJCD#W{{bUMI1;6b`KvA8peUD<{HDPCIL$ z$7?x97qu4d4A3M&6EJ?XZhZ!#PR^xbNeXz55G%})>~$Q0=EmfYm1yql@3MS;!9duB zNLf=;;G)-G*CUeKVsf`NTrDpCN2NR&T$X~1zT@P|u8@iR^@Iu4dH+$V&YDwc-gUtb?Vj=6oP3Y5V;!r>C=`yJgeKF(GOfq>(yhD z{Ey~`j!ALbzoCr|;BCVF*8s_mKeZ{cJRY~15!^bx)9W*czS&LqrhDSFq-ebO>hFVi zp0EWrETyigoOSJ2rl6EQHFUnL^L`EsKLG6IOASISUsSDym-f+n_sRbHDwwK*iP9RF ztuSJ!| zYb+$6)O7xW)w4LfNZuVss)TEG#E5O>!#QJwLDAunxgkOQkV(h5>xBnl3EugY!9Axa zl4i*A)?+&eg6@4!r~r3)W~dNY6a;06?YdVR)JD6G)RGPLxgxWVD!xfliCu$fCLZN~ z;E^v`3apJj`D!ye>IJbRNMytt$po^ygp<1ktHjA~M0yQ9a7C{SyVzYPKym}Erx9OCr(kBXF(FsrIVp_y< z!OxR%UPVEMMP#mINZh-KCef_X!bV|Ijwy_XgA#?(ei9)lDAp}^h^=lM4OXcVnEF(? z^UzMQsrY=O0{jQ!`^JoT!)>TZt=d7~#%w4Z;YGaXFwSs3mu3rUvP&Fz{k~_m*)0M% zyaq{`LdbV+?2?byPJSfHdf%MuuMHHY@lAT)K2B-bW*ZBo=rtS5p6jad7R7{Jg*D>p z;(SBEMKK5$;_@_Vr9r9Ve)U2`=t7IzJ?rO|H?FmzF_VV=Yc0sgksN>`*P$Z7QiIN?}OMw=5x$dXPEM8)q~!438A0nUh- zRW&xUFcM7rqEwm_7#!Yzoh|WEEFlM|uMWl&=-*)H88}v9?C#MoGl7bj6gG=13``uz zOomUq2;u?=7tb>cT=l{GlWm#%&6ls#CD(9c1=3+)xnuiqd5_Gy)Bxh#7U&&ljYeii zsJageedhx%UQe=+jFq|r@V&K}K_Nu+Fxi`WSMc%>&{k_RJ0mnqhk5S`!c+1Nk^!M! zr1S0^du-NZWmHGzl>b%|rJ0GM&964~-eyrOUL?Rh%EcAxt4m!?@;1esDtzcCg@!j3rKiNk7qT{qk8E_ELJ{}pedJq2Ror< zh?6m-KH#GH)sMJJ5f9@pfotJlF%(iLU#8gNN;(x`??*T^CD=^>Zc?!^5`CsA6mAe~ zsR;;*$`E z5Lo!#H-m+zQGS78?IraA{|t`Hk~%qfvB*SS(|bcyY6mG{yFNka;YKl`7dk3kZVW}x zZm!_r3re9J8KgC_iNF!z;4S(BAY9L}Q62@_tVNRG2+|CO^__$T6TyZB7T;>^`H9X_QQVmxcF=%zs#!I>Q`o5~HT}9(#96)v)Yi?+9h*V_L1;@I<0rLiwF_a2;r1Rs;`uNgoNK2ZXhVSCAY)B07 z7@%!OgVH5d)RQsG7daFkyW%WTl(^lv30^?M?X)|#)>v+@^~-bhQ}0i6vPaG^E~pC- z>N90fT0UQ;P2dZO;fE%?Bf8kKx5W zZx7g7G^aB%g1$n0gugL(mrOpLOV>}i0Ixkt=toQNWGHka6E%9ur4`9{P_{PF{^end z09AaStOoii|HcriA(9FGV=J~m4RhNns9r4TKHU@sC95W=l$O8w&I}9j-nxhqKgo%VJ$As-QKYU`ap?`ozW@Utr zfm@aQxC94LsMelR_u5m=LMIL!GA!}hz4xyCA?~0~)>yr@nE)d|+`mRrSo9b2DN=C1 zwhKdb+(i(a)<=y!UyUzEtCAcyG}z$ym~bb<swSoJO(rJP1-YIPJ=c|tUA0EOn=9N}FN2K*40u2fBQ(daIz3a3`5ng(nB493$ z1{3F^4=dw1>htM)_dOqG;^+v(9y>|aegI?t4R4KS zL~JO$hyW!O8fNl(jc{5P8>$u0T@230`fe#Z1sb94Rh#`zmq@T zi-tWuar8^|fK$Ey;vE_x3d6<*H@#k0O&WJQbH?CcjDB;6w%R4k#I#W{;wbAL;-v8X z6_T-(*VE?TDq?^TSN+Plp%=@r^sBYJ$M!oWqR__e1*Wf>gddS$p=rE3@kP!(+q8Ac z_IBVRQM&8#NT=}NGy_p%@Q`Hq#kbIb{i+zku5c8Baia(kfa4NW zqb5W_&?&$`Ewg@&fD1?mf)x8y+`07Jii-Y(U2v(AJHw(Pjs!{3U*FhIy#R@83g68e z>xWAU&>Vfxt9QhK4)m0JQ+5nZI^rsliOF;QUCXf*p+K-@)7Nd7eks)-PC|#wf*^EK zqP^nnW-gXb`r~R+Ap4k65~r+v7QTwEyoKKG2lL9eJWtUqFFlysi`Vfmmg($$eug<(tTGSg;K2I%D^>cURW2RFKuV2d-Knddqj^*R+EPY)~&y7=_KAm ze%OWaB7Lb8FhdYoeGN$blpqa_@O!*BjnUoTRmrLI!^1C9{@-D4XO0$vf^Gk`t5 z8c9nUJ95}jz|-AW!~p`i3wJW{5MV^g`RWc;gfL0iz-ha4xn^FdiSUqF43 z#1+L!S#oOvWUg(uGZYA=v^l4W*T$yZB18m0!0jE#2MlQOH((rgD3{KzZKQK;zOS)e zrPX8^iWoL6bm3Q+lXennfnRRL@Pq3A5s)Z}P5ZvGSSc{kQZ&3}{|j9k<04|%Bd1(|x3ePQzJOH9N!WeqLaY*sdMVH{bf|QyNL8HR=XgV9BEkey=QiF|6`SLyE(7Gx!HT9zl5eHy2nW>glfflFR%udWY8FrzzE|O z6#!aeO^rVbxwS0xL8e-f48fgHqDOE!UKl*y7=x+!%?c0e560qB!kWzil*17t_Nw0; zQWK{L6(LJ~$M_QtABE!(45hY{NKXhtEL>;+B+U9krPQL%BJ+qbhrYfP$KzEY(As?T zn)*Gev^Nw#^}YudbKr|6!2W(DWZLx*M+ZaWUNUhTXh^k-t#Dt1;a?moH2EEsFMM?1 zstOsoWV{Hmhv1U+ozSV89iC8iY)UsPZBs7dI+Y0^xM3(s8+p{6<+h%&u-Ns3s5j`{ zwM-;Zp5I8`RZycO(iKVrx)KZ>EJsy6N{b+omfOy%k~(JBF{<@L_$}6lAWP|JrT1)y zRy$>Z<8a6~0{Am`r`)XJ-3L(X(ci}I*MU-kvQRBs2TiNEZG z6w}zxp(K!Ism*LL!Ui#VJKLgT!z^b@Ky>&=Rno&+;qJe3ZpIql>!$Ns@CcVs_nV@|#U@A(B7C!d zBU&@uVA%o4?Qv^bWEQ)=dL?Gbd?(f55)I26W4|N|C?dXl!Vc}icLGUuNfr}QE!$!# zlR z)sz&Gng+PMOY$EON-Ky_$|WR21oA2^7@8yY#tR?yxeFEMN%J6ky%dWVuaciXiE(=NonchbmFk^hFUgM#&BJg_8a+gXP*tF+&8r+m;x|XQ*WIbv znA7{R9I33cQ)FtSDY)s7fF)yEBtlS8jQ58KaP>!@rNJeJ`OO0zPR&IDUFDolkVy|r znH@GWEuCy(2Qgb=jffVMGkD+|*L&fRy9~*G)3384@w^T(RJ}_$P;7n1!(-M<$9S0^ zRk}c3OB=bBq4sWY*C=p_X%f@$vrTD!6|OKaZVSFb%toMB)(^znMBqYsREJGgc+#0d zgkLVk`}{ot>9JP@(|UrUKm~~<_-O*$u*TnbcfIsl7NdZ~b;EJE)_Ed)((7Yn(P==6 zU8$`~X6n7Uv!CNb_8L+vP5ClOgIqw2!D{*YATLwZ#=5^z; z*KP7O4{FkZCPq?@C~zNU1`4H#zx?1<8X&L&ukGVk`sfu&7Pu8-D#?Kpd$Z^+ntr%+ zbrn9CqZG&Vh@!eA%^-bP^XVGncxRFhkmwDh4gP;*Xeumo?Jm5_1poFH%^NrhK#t@N zC2rB0%L{#s#?Z)P2MzN{&Ln~ znTTr*ZCik4>vw-i`wxzgO2OB|Vt?-oWfSajCc{$ntzvmxOJEq#%m48@I2K_$e9BB2 zD|%WsZ7zsLBNV5}L?QnZQARC1(fY}0jC)KXFgMp*kbAdhL+gNQD3P?}9oh?Z z9i*)+cy>?mE0ym=M;(>Yv(Y#9!VFi>%*Q0>Es4a9T85QhG*8md#(4w3kSfx>rzhU1 zA%ls?6N2^ocf&Pz==k?c@C~d6!eQWXc;v+XnNus*2&|L~AoXqA)u<~T^bwQOW~&uH zGix3n=1qB)$bT&;b30U0j`~*ZU4fyD;uUYbsb2k{KyT~6!p!QdoSS4wc1DcxE1q&+A(~oCrEKU*hV~;;x_lAqZu^ENEoB3u|GNw-_)i5991~m5CQ$00e+I42 zU-n&N;G&sM92+6wW=l`heWqaJLU)jagpla1Y3RsiTt&h@y(hoL(;@ufxe}GENf0`= zRse-Nadl)9F`RJw?1V=}`6Bw)zI*dG9}ad~0yAfyEVkp>178PRXs;iZ6Bb9L3;ZJM z=+a2xjc^#B%v5|@b!ac`^nVs@jnPv8``JRGQk-cP)DH4*)f2i|Ip#1Bicw^kEhc1M zZV5+Q7nC%oj5FQuKu-abxEvQlD0;S7l6yn8jWY1Z{D?(v7Iseb&esy?Ac;)}u`>WK zdejh{S^mvGMz1eSEiTR|_FW3J3wIN(UFf7N!#vv_Eb-RsBuwj}9@C=y= zJ_UD!`UO@va@MV)#08bFu$6?_6nPETNszu?mo*F0l`jzhM(znoHrZ&3v&OiSa<8r< zaALomX7Z4-B!f3iDG8SZT>E&CL%*xJ+Q)%GivErxu#)Yir-AGyW}tpd1D zzv!HCzjZr95kfM8ZYzbMz3)9Tx7l>|g128iwjz>h!Ry1}l%xwd?qG23XyHfa4FBP) zM@shCo$GX(yixDl*nC;9t`I4B@NWzCi_8U>O@-^;CsS8|!t|Dr3$1CayuUFU;DTgy zI=ITa4-K=xp8?|EPkZOG_cgHNlz~gU9QUn8`pb1u>7i0e7=m*=R2eDG~~6Ee* z!nglB*+9p9@S-#K$_;Z03CFlwac%z5nwTHEbfJ{?N%TFdzqzD*X6na|@rG~D5}Qc> z(g17gktl+2Vf0EPM|3NKE*i99n}>S_bgajl<0{ui+zFn7KhY1hpO?Y0loQ`F;G;;x zm6G6sqS5O4yYS#&^)aMKziXdta~x-56fMZXXKNJXMGKb6j*xWT!t%Ib$)eDdXxzyL z{&fa@T!^n840R<|gYJKVA|FMrsFr4s3hGtNz+_&7$O$*N8^keeVaJF$Kb zazhbLZ2gNqXoS_!IrL!v8$eT!B&Xts^)7m~&sNo2K=RUWjJY+VyDEF|8Y*KXRpo0l0OZHYCmN{Q zGghWEDxCoOvISOm2DR4|VP*yxB;!*F_l8+kRD}sO#WXd_dfAea&zGQ2e@MowW^kSL z;ceV@kLvKp<}es3dZaXD+W)JQCD2P=QyXniRs7Iz&0v8X?5`IiXix6axh_8Pv!1!; zu$-uIt64>N?ve*EiHYRKWD%BYY-A<+xP4#ttV-)~gU{RL;u6ijxSg&GBGi3(Qx8RESCt0z5mwr|J^ z*cXf8jqxgn>?#$26`7#?=cBxfmZ;nd?UH+slv5d*F3di6Doj?peSPbs9VnbfV`{Hj zpywWO2Z;hgH45JPRK?3FP*^?tblr~ljkgM+=u0Vdb!v350@ru{MEtp>I3{F;`Q!Oy zW0SbZrC!^tn_C`Gq*XY6V^v2}oUO2Fq92C((hi3_MYObO-NPV}f6-kJNWd_u|#b z$j>U{y#|SJh=7s{(45PAv;-mFp+n1p5K z{cFyy`7MSKzhM$&a9R*&U-5z$q(?!~gY(IKbab21`bch$Fzk|H;(}cuU+PAObYkO}$sr(dpDvZ3t1$&@ zv=3t2PNmKv18ob=pI)xH4Vw;t_YF_;46pWec?CX()D72c7UI77r2ibLi7qRJ=C>h^s`R>y&{J*HYmH&ywENnCH#&PcZlxLG3iX2lGWoP zCZ%lAeD2&5ztlSLJ$3yN^BhH_XZG;8W)(-;Y*RcNczW3mh6lUP&7n~(O6k00TxIAz z5ngQI$>C69fkT(0eJ4(UMytmHFQ5)tj2yWpa{zfh9Mjh>=Q~H;I8LYObltr8-LK#? zLcB6$)*Fq1YvtM&kz$^k?Qm8M@QeEEL)y@}%lR*KVQKeu1c}x}er;c>cO$_oN#K#p z1y{Ucc=XAlPKVwZ{ZuC|f%NDm0C8OSaw~w4#})~hOw^0a!5ooxYper&_HX4w33{Rm`Ti@-TQPPN;wEmD<`{z`<))l&_ zI*zJNVL1t_wlJrL#H~WgQ=Tcn6E!|0L_6)f+a{)BxJ}xopkLGh#e!t)n)yU`@drAx zFj0)@W;dfQIxVX_kIRn01GvdFT&kY+ECY03H5=NSSY+k?HSpbbzm(3Xn6xVl93Rr+ z35m2a6`Cf6}WkXgTy4ttuY+jWu)C+cOAZwbqh#mmswH>O{C~x_{bK zc9uZqaVIo-6%;hDlJ3P0lYDl5GS>a2N8J5BBUFY|;)STaZFJ;yzpU9uC3#W*t z4FZZ1)|*F8hYnL@r6}p)awGgqZ=kUrA3~W%!SC&6PHt=wamL2Hs&6UyjrZIsw1AB} zDh9Y*i9?`Hd@=YOA4rk52JQy(4iEusL=6UD2s27ZDZ9c=B5{DEWRO7ImXC%EvOJfL z?X}t5yPQX`GA>yyu^q21+k*H8J)ZC8s$dPFT#jaF>8}*B$Y166=0BEYMvc`)@(ps8-<5{??Fk@!~Yua?ojDKG1|uO zcL>D5Cbrsl7p=e&VGEW&{eT1ZjsrH^UY3Uv!gWdwf~szv5b`&CS0<&pm!5D7%bo~D=w-<~8FfJA&0v1DGhP}Hf$h?+~ zG{kbxgJ3}iD9%pLnR9Hlv4fcV@G_7rQ_w6H#R$+3^nAcTMk7DPO1imQ>aQMboI(n| z30XSe#_8wXpO%3$QxMNUGmRs`9{mr0p+chExE8+hfwDS7`}`%x-1{o5ulRz#{O$J zb~2+hP#P0Dk6uSRwOk&jhdl!-{o<7fJGAajmFcMt)iz7m83v z)_^Uf;>tVSi5kA>`dlsceftn-dO-I!q#dra>frX>5>cl~Aw8I9d>EDdl@&I<&-sdj zB(>u`RGEzpJzB4F4w>_Wh=z_L(nJ81pKOkySYNd7Sgr^lN@pVXx!@&NgYc23&(R@# zk0vkDnxm>JSTy-HB8vTVtSs(vVe}F-{__+8L1y@68O9B8X6}n26zqO2L$vj=ff10% zXg9ZpAUO=oZ#Q-qo)XB)*#X<%tp5hury}fmIAP`s!(cCn@$I=`tNg#hzhD$tad1qZ zoSNX7mxW<)p~tyW*d7gx?N&oWmh(pxv!z!JmDqc%2)+%M-~}Ltx=1rxQNZ0N_VFrr zlylwfyi;RLsRy-^xLX;uPU%(7@~}2}A@l9k9#|JRX5Wl>ghMP?vXQ-)t08Y@8$DlW zUZwhWMXSHEcKFxVxeWS(m$t((jFp}5>3Yq9BRG)nW~vS;(#PO49RNP4X8mSZIRR7g z>JMbaX$`dY)OFI$GQhpS(c8V0Le99{_&FkVb zO2fvom;|`HI7Ix~r-xN?9Hxpswk!d+yXa98&fuxI$JP=7(BQm<35uChStSGJ=(PWb z{5;-l9AE26nS@9IAQThpC@bq}i(?fPNwIB#y($wQ>iXwSt9BTu61oR-myC)Z&J@3Q z_qZvk;|1FQa)HhBB)yeQ&0}s`rIm2z`ST>9T0>4biM6+XI5#uR`>n92jU*#;*!!{H zI@qbe4kEDPen)LTt`43s&+dw9(M0nXpBuVtR zt4!n>MRMD--h$Ux2UMm^%Uaae4T9<(F>S=4*!1mFYQr<7!2>vNI0S;0NY#f_ zU#M$>OZpvmPexf3`%=m?(B1`Bo*e@ee;coaG!?tfc$X->sb{mu3vPtyN2c9HUG}^U zy3!0%vXkpR!Wjj8Mch#bYQGcv?if6(;KQdN^*SjzHYBrs&+EV2@?_(DHQ_?89#rhT z@t`TnXddeOCRgw@;)!vl>>|1)GCUpiAW*fRSJJ^8(49ng(HKQKua$-WHO|GzDQ1jH`z#eCNRa>T~$MDUY4CsvdlzeELZ9+f~4w0g9{n zR<=NjNrg+-?+0i+mCW8_dZ(<8n)fq{-)=C;w|QW+=A4i(kMVdnEQL#ASNTH%;*XmS zZ-u_3{hkSu`5(=`0jt*H1Qadp@dRCLO9U1E@&GEU2_#~wO9-K$S2pW1SEYP z#qK?<9n#HQqGA<27aY4VsWXE*zX23T2i9L?=EctxWKKZBT9Bn9D4d3PkKpk*l)7KW zmcY5?D3S$C3#`#3!bJ$?P|e+t=f%i!qA=hqFw^>gx} zB5vcR(@|6h=K{svvx2aw?Ge9h%1e)3uh^mX{4A!bf@&fUe-H2dvK811MbOdjJG?tR z8&OVr#(CPYGPjUqiToy-AzVr2QE`d0VhXd{;BMj=G^5qOJsTI;OHgm$<=E)btmceRNWJ1*eSC*Do9KJ&};)TdG0ege1` zob+~@N!9U#6?}v3mzlJ21b#i5FCsQ;X|t}#uoa{W(^8{!uI02$Qzqr?S9L#CHOjO8 zC||R?J+&;G5U-qE&=ESBw(C-i%Q=r_m>?Z`i!bi{P5eJXsJf01Lo z{Vb5ma*+Xo*6s9>*5C8qmu`1|JWuMb?bZ8^p9_9_Zth5T+07}KX_cR6Dy!GCRp?JF zaPmRpw3;olF?fzAXK*0$fQb7I!sQVTWsw#0=Z0y#aB8B({o*pzPG451$BqpHu4nHB zPA*?*uetCEzgOBc`6?=;7OiTKk!v|o1D!vND7&WoX4Ajv$1&CP166EYtko+^|4F5& z5jk|B)0;jkB*<=$@`^d}h$KR>bcM~8dY(NfKac`J7r<4SwW=nd7dj~~}sCaSqVrhb?Wtr%yXd+2}FYQtK z=2e0r^je1(pM`mhFV6Bt$yf(RAnFS$b8X$Co%r}Aphr4)x^i20Y+DD-9KUmRfeh5f z{2-|nFhWKp1UO&jM`aPrY)bgvQ zq~mLZ@+BxD)j4wedO-9EqGr<)mQk=da zZlBZ*kuKsE7CT61405}o2`B;yok2}o(5%}>^Ji?uRR-oZOXMRw6ymkLvMM)}cAK2v zLU<{s)j=+<_+*p4*S{&MtdHunIN5UfkWvyQIl^j&dsX?xbml0O1~&~9r<-1b1zsu* z>tctkFs~P9OJh$Wrp#g!7S2WB;|FSDRLHCiax_ZU*R;p|`pL?`M5athD@xtS?sj-XgFm5_^9BbOY+;MHHMRk{1aNzD1qH=(38dSX?c_}47 zXu?om0`T~usZmEZtd+a?-u-u~UNZFtO8bqE8(zJR@9_BlcJHEciM2ytYFeKsJU9?y+EHD7iizqOXuvPsee_p$kV$oX zF!!GRsD=(}w<;o$rSu96Uu=$@Oq0YE34i~9R0nk6Rm&^(MPaY|uf;KjNK>{#(MmkcY!38xfubV^taJ7~sVxHW9sl*>Mv8E1w(P)paq8Es zue)VNx7XBreXf4(diZc|#IqnQpwME7GS8?HwWkTlJTpL;hO};qN4y9tO{yXz3C-nu z$85=L0H^uYI4mMAZfreapCqXIft}yMTID}k&-pA(mCLW-kbm()4}*K!{vQAF`nam^ zWa+B9_|A(>(Cyn#cUn_TtP}e%+2x{()PP=Ke(cQT3l~VV1c?5C6KLYjsHEkv6+?vE z5Yzdx)^#dte9IgAC-VG$+>@}%+xxabr=wn|=9)`WHIiNZd!w3y2tEmZ2&pEFi-%{I zG<%nFvhcgC%BSojTOV$GkwI?Vxy!^mBd}SMhQ0SwH&VWM!McHGG5NKiWIM6aeaM~~ zpqEu(>25)UE)^a-3vLZ z)=dXdHQ#>D-te9d^;J298O+G*6N5o~9UxQ`u6(1#L?L}kV@0a}k~1;ikee3;#_-aZ z*7;=RX}nuN3Bj0lDofK+F+yiOw=tr0&xpo*j)P2AT_70M8ReX5B0_R>YIMxY^IAyg zj*-#-tmR_*6Tl4fjc%E9%b1>QCW%DEgATn{iTR~yCWFdGo;AMl@%JPxDEllq6FJ2z zS|e$(kay)7 zG@|PQYJYe{4?eG;~azNrORw|f=?rmI|WzXHNm->=C0SYWF zQwqBA8&A=1(?ZNQiY5YklZz7lPg>9Z6`Z@%e7r1HD9hl?g97ywLqgwh1>5HFPsu&e z2VRDt-JjBANvxl%D;RD4RhN$rNEp!=C}SxHf3d77?sk{HP`#@ZSbZSTGiEzYtoF|T~3~5ae_huQT8zD@7<+I@mpTN0aUhdE8oO!jC6c;ekQiu!1)6V5b8tv*? z+OASg53!<^NT-7K+F(50D1GH7uYmi#*=}Ss(Gf;c`S6|XUKdg5Fku}c>+(3fDl({^ z+z?8!(RckK$i7}%5KuPo%(StJFxUpaM6SGG8$X1+lP$L}t9o?cxM(=sAs&_o56)XB zobalfsYBRB&$2PL8BXnZe^}tkcMB>{Eq2S59U|nv{}coAlG+wdf?uOWNo zzY*ghk;tCv+gv}fSf_XL*l~$xsgSXgB{fH8pzo|YZC_#OFK}0)yqtjT405!_tW-}> z=lxk~oTY$B>`fmueS?*XH?+G(#v-!pK^px#N$Z1ohMKaMGcTR(b~}##;JLv96!$fp zsp4oy#mmG%%4&x^7XuQ`ZL+^%k%IF{ZHBV`rN=oODUk2;mI`@uV&pI94{-G{T=-9T}gad-h~nv-s%CQ!8-W;&guQM zMC*X(h3{PSsZM}b-yolyCaKiob$0n>%~YfFYqcs*$+UMi=yw)Xf7Gaej#~1~-aE4| z4k<-_wp)i7|8KK}T;wx$kQr0Y84`5)dFj4ja4$j}sF_IkK>*H&MNa`OB0@i`kWi~v zXG!I+3DL*Y#^ywW5Ub1S+R8){k*Qb(Ki&cFH+)TM!On5kCG&4ZooDu@c@|h{a!{0b zXsbRdf3sm|z$cfMZF5E!Q#RiOtEL*b3;U~2T6WRq`2>;hd&!Ci?~JrtB1;}Y;Ajp*YmOCA#`=GV(P_mQ2w5s`Y-uO}7$@)oE-;7Q~5gHN4U&u`qgj4RnUpyQX#3DVKz|-65bQ zGz?`e01&WWbp?Iy1J)7~c8s@E(`qOi+NU(e9CpI9kxlb9wQY}spU|E=f?Ix^h+{Aj zhynv|and_i%Gu=R@((&MMY|1Y$4_?#zRx=v*9{`$LUWQ?7OBOtzYFg+v)~E9M0!v6 zCe{JPKL{-A8$}t6SAV>W3J6$ai07&26h`Lre=T7HwaIAW)En zVfGwJF9O%jD;+uKdfLJo#A5z+{qL}+S}>cF5yjJJNlC?vc=gXV&M+1)wkTD*lvX@>-FBTtyGCTC`>*MuTn8H(!X}p0hPDQrQp!T2A$n^3DP# zrzquYf|n~9mk%I@us_#$PTK7{sRG7lwRUZpm5sscCbxzd;HIi3%** zg2XbYf*z}pbZK6>y?kpVxhMOi_V{{>w6snkWlpbh7e4;QuZuypf0sQ#ac9kcAfYiN zZ0f|RQ~JD?@JytpSp!k5SJp0>h=83zM|&MlSR=@h`l-W%vNF&_6I21o?ud=lwo=Z| zuvX#Oul&Qx*Y32O+c|_tw2I@#+->iGa-mSiN(SdQ6iMKWD+Hgmi)&|nrt96|zqF@+ z6xAnvV|ncku6mop=m@f2)9AxN47@EQ39k*ZlQ*zClO_axy>BEYwJVlb0z-V^s8>*2 zw{T#2gsrs~Lc^LLjuakVY9C>M`yBv8;aKgkN7EP?y3l=jhIX9f49~MlS+u zq==%0q!jCI#R$kfzTa^Pn*&tX?=ig4VDWKk_%k@%a;=Yq2hshvUBZ7 zLy>BEKGD_i*SiK;9NY=WNv=skY}r8*ex;TkxZzkyn)XrhsBD4jlh-3SQvuHfW`f!7 zHv>^gn^`qU#F){dO3lB-@JKeDt(8ax5AccBC%?Jx^{e5&xFG{c2I+H7j!H_cC$(|J zf}Z#K^p#wSVCo4;Y89&iDf{GUN9s@Cl|5M)I{M&lpnZ@z)j=>M4L|Wd!`jeC_Fp1= zL-;?v{ucrheV-fVx@5Afd4Lmz>`i2$4gz|2ZSgLk7hUNvbQ^uKM8x2&$J)QO$R|Rt zHj;@NdI}a}kB1~fpXQ;}S7av~9O8%tf)3w=WiGT#7gzBJ0FG|j7^b1Ixfy`231X3% z2W|a^RP=_FPdNxHgY8Ki0ZK;d3%}MtT`$u_W2^-*@C%{cCmC1=BWfog)?fg=oCbt1 zrcFs|>4F}`AB~cFd531DK8}Hg&oKKWw{9ic3Kpo!h{+f!zfPtexfL)4o9(h{;__}) z#N|RA-c=~Kl3eXmy}E;m^Khs99DqH^b?c)39Gj@>{;e1O={DubP8B1JT_1r8UP8vr zA@N_Yjt2E+MYbL1@yJ@TBpJ*>f5z6uo&1(O&J>_Gk<8C_UUm55;XCsfc?fXY_t5<% zWsC^Q&R-?cv{%V(Vu94k#LS2sa+y`pLk65+6d7*3l@j)cBa7;F7vXK|+U)H>EwUJ? zwBRSpR_c^ec1xU8n6#?8c4Y|+WCZjy5tuJ0D_M4Nk_He} z=>Dn*k+gVAG2GQ4Thykn8p{A?M8%Frrq-Sevi~F~m%K_VwYb)1dUa!*u8_5qbwkPY zl7xh#O|UR+7?}vS1)nFAp&j)HS^i3_RIV9jUo*^>k!|RJB@I6*cqsuqCFCj5rBc`~ zZOy{0oZZT;WKOqLBLvCPm?3SQh$IdHC}L=Smv0BaufNNMg9#cn9-9BlXqLHdE3&m# zo*JO?8IPQb`#gW&=ygahZV&8NAr9T> zc7xX#3-c=Ix8Q-JEovn&cqP_-`aCoJ6rnDG&Yo`r< zeAQ|*6MVRDKDb4v9jsck;1VF(?IJ`@Di~g1!RS&kaeBKu8kRT{ z76B7eM{Nt=x1mlm<=3)-O3~PJyjk1}2})f#7&e+sJXpE{u|o218F`l@J>-gfpC*Bu4j+a(kC)8f zAgA$3;3=`ZPyW+57V(eT(%>Dw;-RzF6Kd!g9ylzaV>4Z~E034e-Pmd3*M=vtm69Wt z#>o&(%P9`ym@1j5ZGTn)%i~O~W65tD$9~tP7IGh{9bNVoGv?~;04jJV|5WD{CN z3g1uKt-g243wdVNbI-*EvYOu{USyg^?}nYRKMVrfxTkWfz?}NCiha?ggnuRFG7qoP zr`jC0Id}+39YRmQ{WEK5)|E+96FY#tsE{8DgpP8ukL=2xNY;%%z5_oHA0OXDuP{f$ z8?7f$_~Jr^Q?7VRrN*)z-7)?BGaM}*hA#0n+M3dGBK~uK^Ez9{DoZm_7Byuip{V9h z4kChV?UiFRw~Ne$-WvUkNZR7~RBXr8yl99CXpG+DI>IzgGCJb49c1WTc}lL5@{J(KI*R ze&YNd_9{dvSDHah`6U1e^PK^krG1Ap=i>x=6@exkDdgUeJo;hzsFton&z_JQnR;J> zC@_*lewNVMp(dT>;S>xC+n6^kMKFpDo;44%P9EfSUIUBUu0mL9GE1fIWzU9+{=$$k zmiP046JxdZxF{y}oim|FEM)RPB-Y4&rOD`hHe}g+CyAu932!0y=JNxt?V(L3^<9bv zL4DVQl1+%EOM_NHq4O97rtCsg*@~`@@me9K-!AQ1@4tZY?@D?GG~p9AZf3NCC)<_C zTNUOpHMmn_4~0cRK`JhXC9^=$!ZnW`B|Usfhgxkn@oM4kD#Fp!O03SYEZ;M24+W7EE2`SGIiMtwXT* zlXr-3wa5Y23`t!pT$?1n$0?SO2yvCL>fD%8iX(c{+JDxTRpvYmaohOTbLSr1|%AVTs@*CU%KLE4kyVCb7B%RIAKtp zJS#}Jru-nTdbN|N)w_IGxwM&Fnze37&y==a6P_n{kJhm>+35PY@*wOdJ_VBC&g4}z>U85)N zqPD*n(>n`%LOZEW=o6BL)MS7z=_G*lbf;3>O792&N7M;@%iOu43*qBuV}1ssen~%@ z?!4)Q)A9A$2P6x!i@J1+<;rfqTK*;D{^s%=penAQs;l|$;8%&jEwkE?7CN#AH*uM* zBxds4UzsjoS_`~;V5~>={|7RD55dghWU;w*!$M5tp9Mb{IUAtS0sOD(R#SNYR*w@XqdtZggW>7DKON57lm%I*d*0hd z$x^ZA2A+UTq}rqEPNdRfGvNWaS8kTa?vtc?nhH+ut;25fi{h{~07Y?gdb-6^HBuUm zvqHj3Z0~2Hej*ntWP#j6d-!%Yav0mpj*i3Ar{zI$r7bJe1h;Z3yzGi-2Z2&Zm~hoTDJpEF{qxF1 z-|Sb;A-5&-nYuK9hc7uo%v7ilR3FBfGat0h%+Cn)jlF(d*=UTYToAC z#?FP8oUYVGrsJJ|6jR?-ZSN?=^W_fb0ny%yP)jQBBGThb3Fcd~Z3c$&1yhX|xYDJN+9s;Y7h>LZ| zY1`cP@cayHeQ>B`B{l+=w=YW~fxbCpy+4Frb=Y27hyg!9$NOFP$lsPtz<>kG6xhjB zzk2Y)fuj>&-Cda#mVHBmwoGnE^eR@4FA1Ha1AJ=tz@c48w_#M zMJ95H8*z5eIOhB#remwQ+BM}+S$2u@;I&fcVi6DRfPWcGsg-0uHDaoz(j2yTK*2B| zQJ87Z(Hul<)6~J4jR0bnZtrBvJDzrnA&BA^DVqYlNbk-dD@14J(*>CQ z*!?Rg`5gyic;G@XBKxuSBTqmpSQZ6G&k7S@CE|WJ11?z?;jO7U7wSq2)f<;G+@9qBQ$(U^CBOPh^WINmx*}gR51g%d ziJ^dXSdCNd+*Fn~KyH0saH8kZ+OD(nQ_!~v6x|&8tc?Yk-u@IssRE{h@UJ1LSY)gR zI2mXjV(#^WDz7Uo#JA`F+$$001Y8~6RfFHVaFm|3K(`JYzw~@JV>g)XQGn3|YQnFE zN@ctLka-v5z&zkbwd<8aQ@$zC91_n+ubvY7Zpd?9pK_QbU=`L3bNt;qSCj*tM;-35i^CEQ@Ba}-XScP?N zXJixSvo`-Mw@}E)q@xiFTfNm4-4NrAENX<+(uq+z{g;5_A%yMePTMf9i?fD~OP zKGZs@rJHb(12E2$@V6_LXd#GpzopWMF&!i6PDt13BEk>C0W3D`$l+~za>;X_%z0)OkT$~sl?A1CRr~e-GA_q zI#E=31DaF4P8t?J?5Mjbdw_Xti1#sU?Nag60=f2<0=YX4GgMGzhD}mMoP`ial(}~Z zFk6hNe``F#p%46$@59D*9Y^|R9c_YCa0+|yoEzeWJA zJYWsNQWz@CzgASC@(xXN1DGXZu8?ZZLzLPYXtzfoC*WG0Tz;jD)4L8JHZHtE)(TF| zSm`=H*ZR&ZOzWK7DY>z#E`tzovOk;e?^u?gr12o0aE^|RW*2E`SHwwQ=m=vIUPcz= z?1A7>t>}*GrPYmL%tF&pemRlGDPEo@%-XUgY>T$*T%tyuu+xFd`8ukqd0i!9_OIm- z_$-5jvg4qD_Nh;tvq1yib**(F|A4AR=UAlvhrwSfw*RrSc`@9$PL22*){O6QpLy1D zB<7m>>i(=)aG|a@3y#8dmXs5y=w)?v0@=?b2;Wj=v(UDFBTi4dFSHQdPcO+8H`-s6D2|= z&B3C;gP~NDW_^FWc#DuD=JEo|A0fs-BDtF({6}v>P_#|2u)b*3nW~~hi`3chp?at+ zcSfykJTSW-@s1r6=`tF&KUHwN&so~My7W2_Hs<6VizDUx7c2bl;-O3<{-L_2wI?r8 z+}Ft%*w7tN_$}uA!>pu?$LeQH%+=rVBh36@z;bV9hMfCB>QVgMO0aCvQI&&C%~T>5 zITjByv&jOzj^B}k-@G6^{bDZQkw}*ZoUrMH?4J%1xvKKXvOu&&|Cp?O@ zE`1miH;xbmGJ?Bd_jj$rZpzb>m=3%}O}$LF5?3Y*ZR9Cdy-$8aPFn!mh`Y_>Qp=xv zSr)b4bSztJ5|fBM-+GG@-k8FF+oQMpFX;N};J~+#Zq#+^hS#2gbx7pQ(6f=A6}kL6 znEe39HSZtyviba-EX8x`=np=-w)|hhzSU@ytm}*%#S2YBTxUEb8y{V>3nb88L5t%c zS@)f)eEt+)>NiL0!_86x z@uBp;0e^zA@MH>p4fY&;TaaWGD=2#ouG}2-H(#ssAOGmsYTRR zdRwqir*@1XH`Dz|ew1KEyT*seRK82IwWbq;C+^~~lR6Dg>Mqi_QjD{|x%P`SI4d>= zHDIcO29!Fj4>A@rXSI%RbSg3!?qTJk!DO^;>`KMoaNFJW0iw5Q3v=tsqDPFaeq>!X ztf~L(AGFMF+fdYX0~`ib(P?C2>UB4ictJ7btG}8FA>!Z8JX0d(dFu~=FZ%v%z}ZXR zX1^F4gu{UE9f|oir}zbmlo_|SK{@C*J)w3Bh%6urOL~=8F|XUUlpc~R;N(D4irvEv zD!qfq{kXC6X`4PTax}!{zvVsIkD(SU=}8qj`auU|fs}$|q&v?Z!Py3;0myfdm?2D_ zvdT%hw|LqyFti0w^oEuT3Y&V}O6qpSx;`MHTrDYBJkePV$-RNzZu-Cr7kcp+m|ui3 zD5Nyc8fTOz&j$A@AqXpdiCycL7&);~^~7pG!X>-DcV&DwXiN$wPgso16Hm^7I8#Kk zPho;vzrw&XQ5nu%=zS)j^2$ptHeGKBMHPUsd=u40b{}WZrxh027^}-D%%X->2JZg* zSC#pGxb$M*_=wCfI)ee`@ z?G9iR(sV1ak!!xb>TbGjj@%P296yL&k2<^Q+P#(t6VC92rZe4G`?|(@&MU-OSEWSk z@?{!Y)~KZ3chCb`k3ja1*lRXyMzLhoGttTrlx7hn2YLV03e@F9!)ltinWeVIKSP{g z8-2dywa=|m18|kK`6dFap4rA_enPf=rv<4*-44tXkiG<|G{$eZ*OnpQQS8rQ+H6x;J41PSu|1Y-oo&x^ zxaiS~{Si9~b8MzX34`7ly==;vQxlWOh~I9c{E(2sidnC*bdeTkzi>zgBJX1YoqABB zz;pDP4n6sR+ABY5&M++|#Pgkf`}|jRbgRVuYUNO@C|t)}=(`S<+kIXr+Y`4R>0Z({ z!tC~sfZf{lw>Dr38TdeB)fx7?I4)@)eyk;nl40PYTC!Gy$W&gm$p|8ach2N= zpS16_FNK`xo1|jjsDP1fx`OzaY8Ae%WA~yfESs=Erxfa zux9Lmc4LRuXWNs9We)3hId0ALy}x*awfZaqo3IBBB^fJw+E!nAD@U)ta?SW3N(oa^th~s z6NTfBVsLcSfCb^g`qHySD6Z+tm60@mh2bqR-^vhEzpe6CPk4l6t80FnY7NEPTkYGL z@}%*zn>H?I*+}5x+BI^kY2^gly08MXRr~sOO3xN(f^{ zZ_-4Htl{DnjK*?R?~`uKXz|;T>x;sfM(Fi--26HdNd(GC0dZIH;i?U+1sQDKiYCPh zX99lJJsE-;GE8`BB%{-6<^j9nA3nPU?}z8DTIjSB!?3@#APzLAjpsxdIqw?9d6F2@ zo94CdUFLFTM?ta}p@?u-1VHH@9IYV;Ar{G?M#wb)Uz(R2VU61cumJ&^zSOV{*X5O= z`ZBqV+yK;f-6Y)ic&d7I6b;|WJ)aTV{}g$blGE?FzY$jJS`Pr5=qHRWZ zXQdZCWtWM(u%A(Be%KgO!6S5zJU7Gv2bq{`)t8fNw_-7D&o21 zbs-ow$O!9d1V!)3*|Wyp%cp?Z?1*W~7pDtcmb0kuvUKmCmFUFoL=o3m=18i*gXYir zs1#EPaDEK9Ig=xRaHAcn+o+~PRKXluOeAHiCWnD3K_%VM_gx`mTv4=R4ycW-&0wBXwmu7vPe8)}bUchlD;Mk!;yBehi?Fay>Oz@!4;l*0PgauH0 zUW1gXQwj*iStp-kahLd-Kr2wl`mx%?=ggWs>Gdz}G`TTuoWe+Jui|j+nQ11HS23Lr ztG8Fk>iQ-MaWWqn>+%eC?My)2LWWF0-OgBNq5+jX&cannl4gx|#cUk#Nds4zH~gPK zz0hHUQlP!O8+ausnvm{tcgbTJ@&fxad@~G zR#}dhnz;^LV$R_@SxUi`Wfv`=YC>A~^OBVbiwJYO4!NG)e?>Yw%Q->~=C?}D6vf5FStFrpOIQV|(i6vhJRL^@AT!Mqd@*65QXk3=_hx?4M$ImTr*nSPj{7k*>Zz zZ+MVg1S#Tvw<7Ji@16GinAiEdoTqz0EIAR~EkE@OD8xI}pWhsk6KDwq~ z5PNi-f$@GX14VRH?lpy4@I{#aRRNtjFY|yac?EBRnrjo6x7+} zGXd2lwpyErwV6s}s;Hpi^IAXB5)pj~%n*kIcvfx^x3bUPGXG#fiL|n>D@~{Mjx-zK zQlYD+?a3YvGu8~d((ESf1!6)*x)!#1-QWFu%L~K?QD+ZYYKrWS9tO;>CE!V>0LY92 z4_IeAG4D2k3>KKMKz@izQ`v@`5)SOFUpUtL)Mk@*b@9slA2LPCsdM?qVB~fp=RL8k-AabN|M)W&zp@n694A(Y*O5f3{f7^`Q`a! zM{={*e<;qT(13N$_c21@EE7ZS^1@E(Jf>WsH?1nmUP)u({*nquVdPhHyVt!jG*Tx z^lH(_wo=dT)7nZc>Gq>=l~Z}zgOX}}>o$53->d~egf|d!XKW< zf4mM{dDCaRCeDk?@SxYW||g`f2Lf<5m&bg(b5O@qqF@P@p7W8rhqjD4SPV z3wnrzOByJ9bRRqv=?uSoiB*Q>LWnDNJ)%fz56N0k??$n#4mgv4x#tPQ*gQ`2Ub{WSVHrL-bUYB{9Kj6ADK6-OdX_ z6<1o>whBe{g7-mgd@3cw<%e)ViiK9p+%1eR*xrZ!0@4{DacNK$#6L3nGr~ygHUWY1 z98?Xs=S=rp2euANPZLM`W(RhJAKueQ4~xKFvadzP76LMj zc>&@`2+ealD5uE~7m=79FKgE$1%~*x#~@ix=9D<_5i0MgTkT#*nL~s`#qY zKcLLfCwMLbv3Gc%uhX_on7WO_j!KXs@}Co>*|`_8am^4@JAJ-TBwz2lg_VRswWNeo zl}-D!W#~Z;TD2)=H(R3CA$dMY>xQ@fby}7_4bEt2P>5=x)=ZENc3ym8ja2-qX(eMr zd!8JCUE6QTFROYp*Uy+C;-vKoO8+S;5qiYQzb!|06)&M}o=6?YR+_t*?tKY)%g3Mb zF+jf)!FFU&nD-4HTs8um-#+}{7#JT-NJ?PARw)V+o6+8T^zwJ-QhvClgYQbeZ-0Qk zGKhDM31leW9Fcf!$g z4Z9|+TYab+P>>bsuP!5ewS0wb8b)|pc5=!ML5eToum|o4O?_&hCE|4AI)j`6bBLHm zf)+R=JkW0U!9dsVhUAm9(l~EL__Z$3l!gqF7q9?c?h!3L$utDVU7p_X@EZ5$_)q}XygQlJJg}s-=g~o1m!`fDp8tAsCg5t znxAySUA5oi7+UioBK>sls@A228|U4tX!ZgNPe*x~i%-DpqfRQfCqwB`B+oVW_DiNA z#-_H*TFEc7E#RZDy^%wTm%$p^Mlq{zHbvO5^BOtZhW7c8Ls|Xz3yoEr>x73$a(T zhv6z^SQ0|$yT=WN5F@yyrZBENWGZ|>sIAOa$lGVMoP61UZG6EYR4r07GGFBuvJNutp$B3o6zmLxazE*?Yg_GQ|Or zx19QQ)b80;Y_W{+H!2_v_-F{osptA*m?{}y%^P&!cpC+v+#UmCh20nAm`(LaHD62? zynd=bXSU*{Z>g!*vIgB>kG2L6YA5U5IPXqwrn?5rxwIFRXkUv4m4p6(`Jip}j!Pid z>B>*X;$n7Vukz0gqe0dK`x-YF@{C0_6;E#hHLc-|UbedqqU-MHq--l|&(}lNq9~(o>rFEQzRLS-&Mha}cWvPp|Or0(RxiC{DluL#6IeIyV zjyPM|4u29sm;a*b_@%216wr`7?ea5#gq1kQp*TaW?>*Qqt7Pn}GkX;n=FbUzyVLAb zV+_rW-U_Or%uG%a#>ZJ*HhUzP@$nm*bM7NrQhJa;g=(n^p{pxZT$B|mRijmXsT&D5 z((j+Al)F?c9-SUb>wH}eOp!IPP%4sLy(qBD^V*pg8wcXXc~yx_Li|^S_R!U-eEz(w zOf{bT!TI|7doBuIm0(!gP;K{!-p=gX_({fRn6Iq4ueIm3Nnt&q{XR7068D4h6p|LR zQlsPRzT3;D#RYvJb-yWo^8-j-d#}@J6ar~iClsZ3@xcCh#hP49&14=jYH(*7YaGV; zv?EgcLAvO}JM?6=>3mGqy$PT3@bku;0Lsdl0d6xV!0p^jvb1epBpOdUP!iLvyy+Br zM$IltWqI;8`tNjP1CkRh3E?&8;4+g|#r_WvS#kHbXj0k5v7e`R_`dpUE zm2TbIGqOB64ZPhHyL*Io7eTDk9%@*0KxUJ@Le7H5#I0L2is7%CO6|ie$BWyMW6RM5( z@p*~P1ws&Q9>Bg}ilVbVSrXLhE?+j;N}(wYA*>%6|91A~Li6sQT6`-qZ3blr>Y~JD zcpb~!`7Bw)#My-?ro72&k-}H~`x?>F9XCaX2x?pn04ZrSU6{$7s{6-L+5>A92DRh3 zC!iWaIfO)Tbwd@o;7L1a6Ufs4(97LC;T?6#l7*J4p2AU>e%GS9I{cQnsf#7j3IG>r z05!YjQCDDFt!9GwTaE(;aUP`8Cw@$=MS<@yY5N1Xo|x3DMI@q{D0E%Wh}cEiBO3Le zGbexc$xh_qu!&MBXvwANP_P)_EZoV+^RU0~KJ^0hqSKH$(l3sEgg~vux<`cXqgFck zBZGe)VC~p$ysr;=@qjB*52Mp`@u0@;~z+>MW58u zW??rb7>3A>>krGpYgQQmLN8|>PcEVZ(1bpJB_S00zZtTSmb z62R}hx^-&x5h=zg^isG&ZJ_BRr%-U$c`w~ZzTR9}+9^RJr!}dlgx9jG;)@9OjiL53 zB67qmRR)jJGGpL8$f4n7XACm0`tT&HeVjoyBnp^)&h78G-47r7{{Ia_4POj=>DFU( z&b1nndZzIlnSrUFGh_cQV_RtEVn<^wO4rRnqNUD+`Z*|GV|Ug)Dl%*1U@B>2+b87A5XIvt@;BQ4x>6jU zyDdQXrpxcjWsxe+JRuJ#O$n7(d`hPI@Ea)k3W?4HY$r3qWjBPU5bgXhdol(*8j^JOvQx@$!nXeoJVs_eNVJA5wwbhJt3 zAD-XFHfS7Li5k6@4yWmii!;iuxdNqe^cBmxqmpZfZ%^ojIQUZ`Bu=q=U``}aZLhG- zNa?>|(dJRg(v;<#1?@t2KD8*aYw)KeX_WXsY^e~{wcwb#0CH~oyV(H7@{1@RVhG}0 zZfqFI!aW=85uvV2mCkaNh{H(URs9!po$Nb%#6h@9A8)IFcH_0H7MTEQ_7Impu-gkp&;+_ocCv&5dMVWG@N3GdoVv@ z@?iRde3)VIGMGp--*})n4@~#7aEb6R1!sX@i#XSL+-(7g$3gT8%?Ca?VG4raYU9#r ze2;V-oexhHXB|W`jv(4&7?7L8dJD>)lp({4)pdFHTi_IreH$k&wm zr$9_P-hlq+OS8Jn#P#Nfa~BGj83QzG@w={gufXlyCk4hlu40V?%e}&dqv|Q*{PO>; z$q}cHs*iLQ23<6iZMhrQhbwO}{>l{lFu2pE`?2mdGNf2S=;G2Y%w3SIk~Z4{wU8HE ztZRi0I$^I7E)k;GeLI3G5dbMtp|aA1tQBH?O9~$F-3>~w-;*pY(M!FguIkB#oqD?9 zYscd@f&ykhjvF+~VwqT*==0;EffeLLmKo(t+(m;gmBMH zyYA(;gbM;;F|7c^Q%{=Qm=vcf4^j}gIAcEFPJgEGHDCgFepWx#7^NA-$E1^A)t&z< ze*gWslM_7qMfKqk>$w=_b$|z~DbaLW9(BdNn`!}0!+ZkWyJLQV!%EkgeOd{WU{r7>4*yiK_m^V>CDfhYUyD53zs!qG} zQ`cM?2tJr6m%yNu-cZ@;+OfVcb}*SmNkW#}*+2R|5OfXk)0pDxx|Do zo6$|Qb2tjoJ^9PP=NdAxN(VKW&oBQc*QbU{T(ve1-klCw1|b>+;|%DMF%^Aejr%P> zdmFR_TStM&kc31*VW9+q_DHKzCfrO&1dXOf=5(sM!zNoim5RWoBPi_(Sy2H;;DI>z z00nfHS&wgff5Y65sDld>lWoj~&=G7oDIa=SquIZH&ZD;uUhyMn`d^+7b86Vyq9yX7 zf`W+7S5uy=+&32cSG8@FQ6Zk*_6$Her>NHOnZzNj3gbD$59Sd8R&Y|lGe^2433a|b z0jHnIc+`KE3lTn%Efg&UE~Y@sRt$_Ah(r^!#!(5pwu4f;Z#71;Un`c}If*omw?|?v<1Su&`tyX-pgLP|0KKPU)<-Q z!O(*}s{WE@Fg{uMp`{p%JQanAaRQ(5_;hNMN9Uvw_RYD9a55u?ow)pp7*|V02?=HB zZYu8Mh<3P8^qGByEb>W3H>L^z?m!e&5iF$qpHihAtrh*~>W<%!VOF^tj00S?dYN7o zWSU-srK$}7aJo_#2jGj93{)q2P&OTvC1rz?5{k+jmHcufqpm}Cby;oBe6@7n*zn^* zF_sK^Hr_z{FH2I9ODM*MXe^GFEVq|7FcAYK^Mn{vL$z*GabsCH&`xk4kdm2lci4C6 zM+Tq&iVUMdY4C)Y!qo!FkQx(nGhezLNKPEd|wxKLFFOf>~P?EWbD6goT zXai+Hw4Q3o=9?Yd?q5I(-mW+wQvbkY|dB4JhGEooIBXLx) z52?`Gft5yrdKiO42m_({AckBPfx}?@BX>|A$|6S*D@o)lrFy9zN z?eqjSIaawj{4=D-ND<3h4+jtsx7)|AI*4WB`;-@k2|6U|x+)$!R~Xqqhea^_E#&SD z$L6Swh)4obh@QQf;S79HT6O)X#jXzz7@2(R($Oj7C8mA9>80@1A2_8K$!<3{Lf-0; zX^I@iy)kc zOE%30o6tazILiA@eLlzp-gP+e0mF|xM;ki{*LTQ_uXMtT5u5|_I?m#1UMC8RP*&C= zW8Z9I%w52AC0SPjwPc*9^l-1*9x-N-1T|_#B?m6b5z6VH0p=Yj!hkX-tcJ`zj#8~J z^{Q1 zv#EnVRkcfvBC?OU`+H?hO_pY5Lq6`4xhcyi5ct;CUj}v?V^eN9sggW=K?A}oE$ZvJ zlst+^8f6)8*g}lU)T7?cE$|&JFxjh^8Fvl=gs}k>`hJOtDawkg#&C~2neRt#G9~v6 zutZsj81}m83q8aE&y9)yzaag*Sgv&E^ed$xIakIuK5Q z7$!PGjV^Ya*G=(fx(xGNQGX$5af|!xN&gyQoaK@wm5mdFhES95_vCz%C1yY_BKvf8 z0B#t~Qpsc41dGb6+?*i5Eh{=b@N`x1u>}B#LR6x4c8jup0=3jXwWXR}CmNqr^7!-o zUKSAoPB~7>q|vxBMkb~IX&HZDe`MJIXIT+fG0?&C514*~4{KN4{!rJkJlL5i~wb^IHg0%)&i5;FE^0wXQ{ve3WK&l`3}<@yWG8}y%)c6X)3*TL+y1!f6=_+Pe$3a(RD1|S1MeI9jz-lQoI1n zC{Q>2x~C)Ii_^ZGl_25og?*Og#avB5NFZ$DQ~*z<8ti)T#Go0h5)bCU4cqsysmFYy zDeKmPObg(B=ZY<*SA_)N1P$zUL#&%<(~}y@Ku9S%+$UGFP0J>_U%eX8lLyPur0HS~ znDX=HJ7_t;NjjO!;L`rett?gvcq#d0UckWN)KQi`-0n$J5d1ML2O)gAM4=0YWD6pG zO$QQ_76@#6vOo85MKDd|ZxC;kF~T|zK&S*;9nH402yMRZ^$*jy0MSI}R`dCyPUFV> zynlhx*j=Jj6t)w9B@e?gBi<_<6paUh{c@_M56}#;15C#}htky48=YiQ`F5Si+_Y2D z?c9(EUB|)s8t{l-4Rj4q+K)9;mj$D2(Vj?7{YoF98GfCMu;0rhLfNe5Z7J+=$MTRq z+o!LA!dRPw7AU3JkugP6Ly={7rSA zx5*9kxa=EFaO{gSVk`^yA7qU*N9|$smP2utY>vAD*~c&q_WuJ0^Ec5T3TBC7xT&FLcQez znJn?_?*!{!ntchYn7@I$_0bFiCLX>bFWG2HfAe{|0=%sn+o6# zhLCMLWM`6>O7q770`w*L;>qHISG`(3Dd%{AMQO~(4>YnW8KS%85z zQ%Q`-z8NFjMRCvs>cWxA5(+5sR**|8{ zG#P~Er?ZLy)iY+`>58>tt?~AlU%(l5{->Q>nBIyT>Us*CWY3cUFnddBCs93-w4rOI ze`cA!@C|`sx!xF>I0I3&$7&D`gtjIGc_tg#m|lbRB>Zd2sxm)h0}Pc&EcV6X3xSLU^W_bLKQ zt?km)8O1aV1V9+P#%pFwi|UD`EDw2C74iFyIVY~IK>eeFcb!61;)9uBMgHKU{(UB3 zQT`rhrU?GR+*!KX`XV2oNQ21k6O>H!bjlU^0SYH_3w!WM6h9adrYm4cko0B z=a6I{6dzI%tILJa>h0P!@pTKQT=s&m)IWrb*DBZ>lBj@Qkqk@w^=l(nRbEtt!`zAm zovXo+0-w4eWqQPh6za*`+Wx>oG}r$=Wgez2aRb2f-^K52Fn8uAeXGJR+i#WK>6+~c4uN%FScdwc1&CFv&T7NO0)DSX^qIJB9~C^6v(X_Fc1i7*m=TOqX( zC&*`zn;@p>=J_rC8C5^>2-RWJeRoCNjo3pe)lYlHOIYA>4ckH_-I)3r!O#qUjXzL8<35x z<32280n2M72XullQrA5XW zlo#<6N|CYK=AInfG7=G#;dC{E%s(_7Ed2n8rr*0|!SImZP;1?qfaf^i_OoX8uO3zS zOWjle(v_=O_>0kR_{?+WcOx7W3M;e>hxA^;>!fCnYsm_ups~ne;*^=^n9D1>rx}UF z^Cg{f)t{J%c2_ZuO)!;Ctr$I=v`2Abzh+4#3L zNn@&Z8N90=T-53REW-5B2p~%y=)J%$K0^#QFo;egQ%lU%qIV0RJewZ_x&C{|^#`T086%i)x#K%wCHaATc}d6y&w4LD2OJCsBMYIWCO_=yt9Qo)Yj{}waX3L#in)?eWtZ`N~p-vt@ ze=d{dGX7foBC`!t)r1}zP11Bm6~z!+d7zk5Lj6}XCp3&FK%tM@0TU46?C8-5yCtQZ zK?43EJg+t4r43g>$yrdJTKE^4nThDI8hlv5dheLcElrK_WdIS~(aM%frcVhW0~k=L z8ZkI9d1y^*OUg;wB1D5Sy`f`Q^YOnXm4zvpWv zuI{Eky}p+&qf@-_PW78J#BaQ&W&NiD;CCmE=HZKPA^Q|`Mk)-kTRU1c{PlX(tBVo}cVF0ZDDet#A&QeEn zTuFvj510@UgcAIi9booHKq@^7hcvR$qTM}O)sx3;RpNjbmOA=3AdJ zI3&ugRH+O0H{BpI`N)!Z%a}*U{D{Szr&u$+E8DfTA($5fnE99$0%@yhg1E94EQj$_ zAcZe$Q_0}}-DS_Qtb^U=92}W1|CoBr3H1(g5CkzRDBbi5&vdfs&ieMWq@CU^T%~O%%MSYpB@H@0g(J|-QeXOsBQPs1ulOi=xGXQ_HX#K4fs988k|6{m-8@< zm!l$OpY4owQaX$L3FRZ?sjn}Wgan=^_+Ea#rKSv1y6Wv1C*9 zGUa5=!1i)V~}aUA;~jC>9tbX8S#< zYRnq72cZchpZwGDa~0-4zcwD5;=jCVq8IcFjC=8N0i}*wQam<$TJx|g7o|7n%{^Nc4ux~LeP*l&oL`D6tyi)&%Qlm7mLGIVQrwpK z58Wnd+j2jq;hVDchLFl^(borFt5VdJ!0tN~b3f_LX?uW!i2k+6ZH>_Hv-Apn&i{On zs!mFK7fn#5jfXebbW#%5rDFl;D#jwHS!-9L_`k`HZYrZpU44Jt{7Gg|0RqdEUE|N! z-?v|SLnPHTn)m|~X@WY}33_!!-JJus1~G=r_|QltRX(V)3{aP4*)v^uy!EslIE9df z2_z|M9F5Xp|JhyZF?Ha<8V!nAmf`xkDmh_;LF}Rip^!n9 z<%D&DJwM5Vy4n!)ExwyP@^CbjH>7F|a# zjB)@h1u+%^JR6R6A2O1#(08E+EER!7-5M3wAz9{)J&y0-^&a5hOyx6X=!3Kd(WxAE z`KWcSP4}Mxx8WE>$LYq+`iLzG{+?9w!@>W(3fbk-$xD4Gm66PM#FQE9J*LEF&E>P{ zJ*7Nm9K>_cR$J-T7vo?#ri2ieK}){=$tKz;M@eI?s#zFLD8NvKd4V{` zfp*hK7UtlKl?%SNRYuwNpgFKFKD-O+-tk();2$#RK?NF6lRzAza>j5?yLEig(v%R| z&bO~eAgcKfTeZI)e??;Xo7O$>gF3JxRfTz{fFIU6>E6`ShWY!3j+#C8H~2T|{3sm8 zkXF~Q6y2b4h)Ypb)V#U4@l>S1BMruzZD@$vLQsAwAwLQB!E!rUJ3sbyge_K6|I@p& zwiO%D1?eh0**<5xlVj2`%;=k(ru98w(N2|uZ~g|Zos}4a?sgX->?$p-=fFbbH#>KD z8{Z%n_hynd{b)c-gQ=SQ zDjJhncfr8iSv*w8M_JSJRP>&QY>fDg-3b30B44%)sBFKV=Q8B-vW`ehw+aa-H_u7K z0}RAdZ+dyyu8>#>q217~x2PpMzD|03rO+Hgvof(Trwe^eG$P#AkI-wUPB2&p-R3z2 z|HCsbWDXQ4Oi4!*ISU+Zn{6iU7fd}sJfH^UeWlc0z?6_7iTcV{FtxBxcg{`k?cc}$ zra^|u&nmKV7!h5C%&%zoQ&@RysB$l>M=&1ZU2|~Z81brXe)`CPOfvG)*?o0I>#3i> zJnQg!OJPVbP`XE9iGQJ9hgbK=9WPG}4B9qmXt~3~cf73l=+RweHC51$cR#ApCPt|~ zBZ)JoM*W=Dzu&GQw~B~h3Kz6E9Qc*bBa>qk!J zrfHFP=tiR~RREf#0TrVDGgQ1_3_*cYn%L_CcEdDk>IGq&C87#`b>pL8fOcPqPB;4_ zyNBu}xBLi9B!Tv`3VpZYAwU8YY4lgCVg4~VZKwvPdoxb?7M_0r7C-60=tZQ@~Wtai+zWpyfMfN)Pes*60y}S52Kv< zsGB-#h}9|E$0F}poqZFPdZFEHx|(qPStlkB^v5lq14XDr4n%-93`;5IFO65_PJA7BxiMKWks;fa7dR~y)P077$M@np60L~ zVNP>fFWEsqN+_?2PJKZ=-umVx+HeBE+cBwt4t z6|klIp%0juE)nhMHp1a0Y73RGA|BNC}@|B@*&q{B|; zKm>xO-;t$?%I7Fp;qgw8KY}S3=PD(n6~xYNpi7;p{89%Q2}=$zZdEa8=)m5GD(~3M z@;(v}GwVZDp(cdZ!zr|?A4=R3#|pjV$;%aSaGh^nyk=y0q}Db(3`Di0GW1b#$pc2O zdPcOq0p#!p4XtEG{i)9!5O-o4KiZp{wDb~gw?fEz;$~IU=ho?lHwGTf*H$)DT~cfr zVHdWuD2R#~Mm7QeelUkXwh%n#X-w<=fR)u5hcoF`h`u=DFUMj#7dc{yH7ZZ|Y%obA z-WoL2*Fp=%UzQrw+J~%QS$Jrk{9iC>qRu8}0m8Cz5(ksfRkgh1y^--1J^=NyDN~Ic zCafnp^B23Abp{X=^MLcY94|FiLaqRs4qha@V%2ce1Z5VMBiT~R zKQ#Hvh05`6)jn@LsoM`46l2w@0b3-UZ}s|2nS5)XRU^;r`QM0zx*xU}dikH8?d@ECT&K~wqS&u9{MS6xlHS9O^^c|}ey_LaBkO@M*>rMlIxftDhTO<<{k zGXNnB^-G|j&zVbPk;|3_n8VQOtJWCPRwBk=u^R*;;=U`4@5;Oos%P!_&%!9 z+(@R2P6k<04^_A?^7uTv*$UrDdqXXHz%>QmFd#gz0Xw`g^2|X!T-{%Xf8xUaXL!&q z+rM4Bi;*U`S@pFKx1*o8-lYI-#1XceL)ek#A!4=!vW12w^@_*x;#0oM4{t2ET3{R! zB=o9#YJ6FDivcI{UZEh6N-8-iDPl0$;m8?kkjO5fmx7rF#N*7med zx{|xhx_~Dl2C-&?7cwE{ZxH`o**b%?7ca5S7+J~|st*+cl=;qPtd5APoRaW@zuhq7 ze{{(RQ){%CplyDMl5@uQxE~H7i4=sBg_V<7=?}3wr+G+T6Xm+J=)H3iJM64{^LTEL z1P85ikl>PW{%e-e{cNf)G4_WX&?(?cp+O`U2?cvS-Y(~%xO_6^<4m4F8_jR;f6%U| zU7wBTGG;(7Av@=?hlbH29f)MPgN%R9jB?94$fpVu)`Kj;l=o)$-|@;f+p8?K9KCUN zCz$HghOdY_Ro>UFcW#wrx`uQt0hjQXv- z60uh@sxS0`ZhNu`r)uCxdgIsHbD6(7o~Qbh4SWhdG##Ym`Sx=e+tA=>$m|KnNjI{ISnf#46G|27;yE z?Po=&r{bbDGOJQ<7+_G#Q)ZrN%Dsdimhi@?;SfpnToKV;?=MQjYe&D&kT0c0Y@Sx| z27Z4BUXZm+wsEgCEu44=Tg`PK*=#snEtPepeGjs4Lt%6;@eG4;8-lr4*LWmkb)==! zw5ZXXV)g4IFg)A79@}z)T8L@D&|`b*aw}zPTsLoa^U_`wFkUOtrG%xustk@iE-go0 z@M6v56<%VlU7FIj=m(+^~g@zs63#)=h>Da+6u9RsjU1hjcPh;+$b zpPqiz0?9g-FCN(n^?(PTzfDelqGFlEITgdbav>W@poxeZPb-)vZnBd#AFU3(oOQf} z?F?lo5}&`qA7a+gXm?7bOvV7r6s0_@(51A3y46gF*Bx{8azKN#tRqPvON}PhoY&}Y zP@B)cQ%UNzD(6BCC>HG3J@B+_DLoc`G%=&Ld+QwPCPOONf7|;-qC1Q^77K+HqZ>dM zpTpQ0pF-btai!3Hu>85T#;$bccC9NjvvEmhY^^d`<6jIu1i1-v)t$kS&~f5A+5iOa zcC;iJ$W6E77>>{)%(-DX7wpq1lAz`V$4VGJo^@j+&lHkK;Cy2LXsbHW-8AL?Txl%V zDLN96diuSrwDTY*djo+$D+>6W>?>g1IU0&}4>V7GqKr5wrYV!!hK5RPI5u^0ENP5Xxq_~IlAcuO9!UB!#LL;R9awrlguVFygr zV0al(a(p*?`r&K2{`RXrJlU%BRvi;NNhi+S_ykzwz5|TD^KgD+NQBumAQr1oT?Cv! znIPMcH(?@O>-l?zd$N~Dv)tU&!v}P=HCO-$|9j%D{K56-?qRfU*Ky15&t@zoQ#IE!7&cdoe%-$EM zzv)h}5M7^3Q)9)5BpWHli-*FVSQxfFDTlu?3MYNKDy}D~(Tg1IW(ImUd)Kjr z?=72qkeU#P2#n_epo-9KO-35%*lxzt=HVeaMUjD9`Ad8;j93x#)H%dHgs8bMyLpxr z*7jnQ15_cy&XneN5#dzF`Z0u?omd@&G*2(QC91%>b;N0_ooh$@=`7#vq{z&|u;`+v zIr^af>WV_)3!>#a1JP}34 zzq3E%HdbuT7bhV4shSSjBl03qrXp}62stY(T&kv`iAN)7TO0RZj07duCcgMr84`D- z*8mci=G?G>%Q2MiHQtMx2W&L|m5Di4`W>P|*zFWKiU9dC+H^ICi(W0nuLH7S>#xgD zU~tIxxf=J3Nns@L4gf@)S+RcEM{{0Z=;%7xjCGxELql+lW$R{Wt`2RM%QZpb4^us- zQQtEVV~;1d4D5@WbG)L}0^!_^Xp(%keV+MX^aS>iSb@=ccPmp7Qr=@n(`0N1r4AJy zo|4M`AnZjYu8e{$_bsVhfT_Y8c*4uXsK`%Z7S%53Isco3|8mu@u3MdrO_AV6cK0O=+^+vAYohm9w#2{@ah2}!v>yiph@1X9{TrejKMQG==URK36 z8mBa!sVV3TuyJaVm_TEL@SB(!e4%-MNp4!@t_x@4x<1Ta25*~U790Di{br&h&9LYI zRbC)N_RXp0R%1jPZm1-4?1v<+?Fjz9;eAd|#y>zRM9{d1Q5GV$cTk7l3coBFX;sEi zk`$q5T41NFpixCVIKcEonhpo>1>9qA7BwOJHd|2C!g&MMDu7gE&Sg5Y@!3DycCA9V_L38eoht!zOan7O<`IMf0V1PdX-A<_StK*i31Cv=?MykGkCv}XIL zF`y4codFs;h^m5WWJ_+kQH$B8mES>1SZbI7Z4i_xZOjz8nE>uw%7JSytOYeGnU825 zwIPpjSvv0tyr!6zAt@w|o44Z!nfJu*8&7OJYrwtz&aA4ZhUUj}MW|!{YKCyF@>}6? zhHGgd9K6;Ueqd?%A^K%&`xM!$x|yUWm10l&DlMRF<0;m_H99IeNs&4x@UrX!gGxVH z`Z9kE^Dp9AyxwsK!KSQC&#zD);v_=}`E}L_cb1PBFb~oBzIbqD$OOg;^o{7)ig8YS zb>R^CMgPU}b+ng!tl4rC^u|b~hE8-A?%Ba(TbtCHYDnEH_m9jrD48jpj$J|#CZ1Sz zl>d(BEhu#7o&3?b?L7DOK&YLU5IB@aTMQmXojf15dpRLnvA26Vu-cKmXK^|u*7MCO zsW4!ySy?3OIiHibhSLv$r!Xc^I`9$m)np{0f!u^=!k^3ghDt*%=Mr5Z&D^G18B;JF;5 zbO-y4AA6!5OH!f34qdIN8T6Pwb~4K?59e-oYphYP^ZuO{BN}<#%q&6!83&)?CTl-4 z`*+#o?}F(Uvjk`qjOQ;-pdKc;G{l2qgpK?Im2V0Sny9+~SraA_7cT3}DigBf!y93-bL))*=}U z#Hhfk_x9XEq05vuo7!%|t5F7}D^H{=Z{HtsZw4V3<-CRgAQiOWt}tZmI~O`65*9+; zos`}1kSr`U;Y7U#bE?Czawh~FZ)JQI7uTvU=A4x_IZ^QhP5#Dz0VTmetMcAW_EKt; zuQ&(Fm8sgZaivT!2>t*TZTlW6J=maOUB2X*_;Yz*uc!8FEU;k>^^;EekNXQ3scffn zMugnNuSDhlyG(I+%3kq-#DZ9RVrHV)6N2-Juq(eV@$!^&UX) zMA5kQTx)^4;{p@m%`7gyAu_RIEdO$kWHLEeg;TF8`nJRdy^)&5Kk1WWybR0fll>&p z2_eZs{oOaf{6T$l9{DdouMg+Q6(s3Oxc+IaBtE%y!_HAu7-xfrPdQ?@lN1>_h1$tJ z+1sNa!N;u! zvTuVsN%F9N4$A3b@}L}xC~JaU=Z`6qcW`2y*_AVg?*bmw7Sosiffs;E6Ohb{G!twR zU`L?_MHYG<%(XD+a04OcZrCS|^I~SwR7!6cRvVv`ILoZcaOoPHo>KS6-`47= z_OC9-&T*tjta7x@bUc!@RM8F4v0)fDQM2xp4B`}H0|JPJtJNM@-xBA8P{;0bW110T zUAb0IrfpCnnz4Tj8qfcAbxgg6P@IHIMrEMp|MyI3QzxDY-Hn`v6>#xsNjS_VuMX6C z20|a zTqhn8wOC8hz%#*8vF_u%SGjK?(o%(W?gmss(--(6S;Ik8ReK-Z?t@A{f|(z=@d9w>!-uSIsDgA zdq*Q_M{>0?N+G?|*QBDzK)X3>ag4hT#z}Le>6Hlb!6?iH4gxj8IXSXB6ZG0j9C~C!WWU3|@n(@HK08n{5}zg<+3lk%^Gyk5lC%8$O|kBE3`B ziq_Oc^M)VS&L+6~F|r*rR72h{4$P$|;kBY4cGPYG<9|q{!_JIBw>whj2KXh^s`8rk ziCVJW-72{>QQ;0p^-O{%%`WJo_M81bOVjqq-P*F<7pYtp(_F>!zRd2Y#tF5uu$tlK zs#NOBC|$NsWVEX`*=`>EZ2Lktv#EKqMg$zcGEui!lPiT$x;n{<09toMorfX|VF#@t zQN(2Ha=5;WkH2BNO>mDZW$Geunu1hqlnxEl{{3C6sG-UMnv}Tf0sVvFnKfObzZDKqzG9s6$ zdXZ2!bl#BU&|uuDY^W99t3cq-U)KKD&?y3)tzMB@m=Yc7JeDYI{gPpH`I8Zcd9?7Q z09G18=$A^iny*6s-o&7)1z-ERLt4yPk(jE~oBQ|QpUBG>MXoHWbb;QgNX7%>-U`FfrT(Da_?5*7;Cy6~8A3>rsOF2f--;&y*v8HWYj?_$yI2*g7{ zaj43T;6+r`#fM(!8#g~ZmNGg`sp7pIRDptRc;z|=QsON=rQw-YnC)}Udc>$*vL2sr zJU!b7H)slkyj~-%k@z>mRXB@`a_tlSooQIw^9KFo6asliIj5l5ul$cc%aKE zcfL#ZbJr0bF)Daha8bdC(0azfe;;?#aEJ@nj&6gm!}3atgT~%wf5$XxKE3{B?lr49 z{%s(Z=0$4scg46~ZDgfPe6ke`z+g=t#b}pY!kYY=L%_rA+K{B>#28=hLi`SVT2E3b z4;q(Z(gG|n_*00sZ0izq8(gb=gk2Q9I#Z&gU6+7$8ErYHS6rEX*e#o;h2A*t3kjhv zPpH;kr=}tol5|znL8L~;xHSK)PTA!0xXue{=%2pqc27>Leu(6%}G`2k=GP zwtHN%@V8v>D}of_ObTPaU)Y-39-fbdAE*8v`p)bJUk`mBJudx6o8pNF%Py)}2x%!R zu@wH*nL> zWz=FR97OfgkB7e9iTcudxK=c?dvd++Ss`bL328;IdqxWcl40lHu6<&ubh-!PuTrn) zPJatyhWu;a9KyefX?&8Xeoj&nR_ZdyACxhx#m#q()D?IQT$Lq0Xd{5|uMUHkIbi2e zLnkp;q5V=5_RX)%lx@^L8k3vea;I0 zn$0xIYvD}86UiV#ND1@sra3TEY>+EAI+i~LiXWL~H?w#hvZ}%7R~vxr?$vVY?2BWtOJI!jU9kZbk?@q?K+2}M;_1i zP%$)HQC8OuOm;&%o;MJGEav_+ry@7IN0e!~Zz>bRKfE?OB3r=S;h2LZ-V;zB$pMkN zUsS)Ig81OSS3Na{0s!#(c^aj@rZkVep$%%pOp(Fhh*^+*M3RQ)g-TA5g5hE(P7VMYY;+QO0f54m2lnpk=)mh^gcc#` z`&9TjQnd+C{6Norx?rB>h{H*u)U;4v(=06b#Z&WnMog^bzwng%m6{vX#`cCsiU0os z-RLpSlL?)FU<*5tMrPyHu7#gvyI69ZCM-#A{7ENEl6j0MotU6#QI5x@=FtqKSd?zG z<<%xcR=BEOseLKAsz>$t@;|K_^}7Hwd5bFwX8fg^zJ9?egw0U-1erD&PH44zH5te` zPCmNqJYdM=`Ig-%nM{Vw8cVZv0N*^f0qQCpG=c8(a{7y+B~d>BC9bdwp-O7Y z=T3rerZ~be;BY{ah1;LH^^i0lNvo;3ooyIF0on=}q3)hF{|gz%i?PzYVS|1Q4sgf! z02SMxp(iDEyz4A!;QJ6w2bzj=JczER-Wht=l3GeQQ^?fzaa_%|^T&^FpK3`+um!9c z5!=JnTk1lZ=OJbf>x6OLd}YD4Q`c}Lk`LSvtUk94`L$LmC6uIfVN&N&!RD2B@V{@x zal{%s@{q~j8gsN~E(?zF9~HuVaOl)w#>f*v00u_9Gl7T=`uusb+*csg<#uirp*U&h z+qdrw7r3m0GbB~#{L@f0{uC*J$X%?e)UylBWkx8F|OhRIG<9Q-BzABfwJ)(kW8tuOlm52%g;{$GdtA|>HxrKs9BhVnb_2) z+Ma0eSGT(~J4&KjxT{T%NNl9d*0Xk0I%I)8r>%?nP&fNz(p`Tec_`~S*yqGNlYztz zYAMB)34h%62kR8-f(E~solJ8OupZUY{t>jaKw;LPs_4XGGKtX9a3og zM?WySAsC7_8(d*}`QR&47^Z@nbRrR(G>nWJ^EeV`FZdP?+zQajbujTYj%ewh-ffy` z^7dw-@y^{lyxJmDJT(KbKjmL29_Bxo0J2f#jlK&RTn&Sa)1GQBqrxohb>B~y=C^Wx z)#T#nKU=6^kjouz>9HoUKr*l)9iW4Y+uI&cCASvQmF`(f2p;5(l^GnNxbUFir8I^- zN7FpY;6A}jIGv<(_-p_xvNl7W#ttyRMq@ zU9`^;NXlkzKaKdqUo`m&$n`)9wH7HB|62y@{YKWty5!QaSyn6&JyYaL9o08 z!!)^SHT-bc-BLEA0)KqTiBucf?B3E2%*wV3N*rvz|S#;|s@>?S<)9k8Ai=NLfW*r74h z6STtJ@H&<&j+J7(h3sr~TM-k|Zg|po>nz2`uW+P3zqN)kVsf-cl*dLbYY2!QUBgpm zfh0+0G8~L#3VKN{^6jqSVtDS1V87Qk1d<2 z`WFL|PPp6hAv{|2tJ1~n96#CMO{in5?`}7JsG9s@psV20B{{xUegyE_FP_UGR{5%1 z|82yY^SaRaPSLU9Rn1X88c^exRS}&(v5Twkib-J4-M?;~Bl#Iad_eO_OPaf;cl0)S z(tKHsPxDWf`*!Nt^$%Y8!;^9!)hhP=G(dskoeoxH5dNG2zamw4LwE zp&+o-)0D|$4uL8v^3p9RZ=3nQd8ocly9*iPC}%j-dc!saLX@y1m1`sbZ?qmeeMVv7+wm1@51D)8?H`!Fcd z(!C0pW=;K0cJ0qntQ)y`qyW?)$6grVUp8?#$2|nfn+JYv3N7SXbcCFw%d%7I>L@l! zDFADOi+5@HNcnak>oy%O;)BAPmZ_rTYLgt;sr&r$zyd@Jcv-j zz}Rx`m7SzO1w@471@xU(=?QmgHhWqaTG?Xa7B8-kKxrA})t_n&wBOXMZ%LN5fY|l@ zW6 zS9avgiVc|-M|rGe^^JktwMp-F7)teG(2LD9)iw~MF!O+535fB6lT5gthGRSDH-8)U6t%}3;A zMvH{eRN%?rAEkOI?_)Q#|8@g6M#(w-l#P8-I}6SO+o!wA9hma?^N9V>i@y5d@pTY~ zQU#Mx%PHoXsf{KPyfc804Jg+33SzsuaVFr3Y(~H*e;1eS^03~LrzF#&gKfm+I+}=` z)f)Ld)fMU7wXwdF^D9dFP_3Bd?Z{&IQ&h>K;6&d#NJYneSKe)9E|UC-XII%d{!3jm z47IgV+A_M@!zEqHSLfQDT!Lxub@u?y|3azm!su;v z9`cgcvR-p~eD`SThcdbN#L~X+FZOVd97NGqpX=RKA`!gQEX0BXqTZ+iMC*b=MKOKW zyrzBxRFogND!w5cgK110_dgdu^CV8L-qu=K!DB@%UeqzMHo;8~IJgtg7t6S>_Id^x zZpQ6`?doXKKlD5tpt>9wRcnxvJh~5?;uCn}hIU@Tv{5({*gx6!D;tI@o>$|5XD0Gn zz8mU3Sq6BH8dbnNWs1PGJk`Bd&J*Zz@5SvzVa0b__B}>P4o;Ui$^h0NJauId0I}?& zGl_sa42ibox1CQ-9GaW6x3m(?!o8T!-TrO6C22a?(E!>~)F-`2ee6;5a1PYmJck&z z!9b25J*}UeT1N{X7j+k*+RA{^A%-KoD#GgDu~>ODbc8Ct#9SEVg!qE)>Q$vtXNNG{!_ zaL_1G;VSLF@AS=NudG6gh$drE6jX+;BI;=cyJ>ff;EYis->Yr{$u1eZx&q~p4QPsk zgFZ|zU)3Tr3`?-d zZ8)%vTgU`|)9b#Z)$*wibzW}u*#JxTZ;&o3>K*Q04WktpF_5`4lCEiPy@(*hmEbqW ziCXc*!E5(*M8LXNQ^x!)k5GzESV)U1=)6jzI?WQ^7jlf#Mz}7D@wDZOT9a79FF0BU4=RZPYg`>qo?81#}Opc97cRU=P5eaH9!9&;uN&hV9o_E@D$u9`7%`C9!7 ztnW_9SNY+$%j=+BX@07sM4^`$sOdhjd9>z1CKsbuLE)h~#l}8vhy4otu)L-CT|naE zgM&L__i{Sb#Ie_NbNFNV?2P$DA_CG-3)v2A1ZeD9^V~xjFqpob%g-`9w)#{7A3{LI z!es?lb0npsK{{Y3GUnLV|Kmgj506No;YlsO1XCX;cfzjq%J z9NjOcNuv$yN`v}>L1ls8q6j9e9#1-LfZJsuBrZ5Av!pQ<3J0a`$s3Ur395~vI1{5y zT>AUlwpStxH%dvTI#Co4-MFP+YSzruRDc+#23IK7Z!(Y%H1sa+aNy7}rOe%vSICASc28#7 znyK5{ZVa~tnq14=*?gZg5X#%EEai+5b0Wzxu-V#yY4}$j0NuP!cqp1=O0nrP%N1KG=77MG*#Lig9g>_gSaqmM*+oDmb8iRE2^#~F6~kXW zOVbN+WytdxDkJtCvtpH)mT9$%3wB20T@1WX8TXZ!09Mnn*ZMSt&%dKX#}&380$O#B ze*ABcC3gZp^f9-U@ti*EXCMiD=;*;BJUrhc(msE-OAmg z5k8lvrhO7)xLeUSRLu4c#Z*?|AbZW@QF9x)=6Kd;44|^kJ=k@G+_YLDHy{)F<7C=( z_qj_L>*lC)Tk-`ri3^Klc>5}@KL~J(B;MW|4t72@fdF{53%zc-=*H=9&lGR5tfSXq ze1iY@F^BX8f>`0Sn;UQ_`eV~Kp_a4-<~!W) z%xz&IgR$b>Wn!E{w7~4|Rr^0b=2{};@INM)8JhV0GiYx&c*o;l`SV2caZ}^1uVxYk z(vwU!n&TQo*VKion&UD@*fl?U(m!I9$KxWg+}*sP5c$Df6?{t7qhL061g{$xZMXWa z6@%SO8_nO1Jni{nwtfNRm7d-oMtlfpEw(-$TQ^U}Sc%R@l21_+s$YhrhU zL|&%){l@jfSSIuyQwjOz{h+lyNaun5zLk8G~V z3(YR%@3)8X))2oaoYs?rP}z|ZvZ$qrX*5E#Z?#@K%W_~N7j8RKxch--c3;}$&*2$^ zcQGpWU+AuIk;imP=>6G`{(ZOoVsGeV8G3;MWJCu{3+GJz0c-O}h72)eo|tAcP+R&P$fi!;!1F6fpC9y8b8Bq)W&=2;O?I$$BYl z)1+Q?KG5unfL?-_z2n(r!Mcf7*?iI7C^%WY8@4+ncxSbVm&Utm%V#FuWiV_;(^ z_L*TKnD`$y@>jXd3H(3cp__`Ob#0funbi5og7zu6kMtD5VU6c)Yk+tOI*No&8;}H6 zLS8d;5eqNju%bt(yZExsx}h!~-1GH&0LcHP%EdJ0sUVTvqpLDprk!rc{sgVf&U5yf8Wc zXrEM^f(_xl4(%x?uZv=$dof<4^z!9-Rb(Lq4P_k;HB^LO3x&j4aEnqPxjB zASww|(zCp{HrJecMV=?uRH_pQVY?PzLb#oIQ{Ua4FZe|-bX%p{O?l7<9YASFTVO6V_#Sv@=w3luh> z`Ay4xae&wD(&g9!C6ZL#)Ac66A$liz9=OmD7!$sm4>~4Q3dW+1Sky>KC2%+Y8!uDD zH89D!eMw?rsO|vt?zmAdt&T!AP&ur=rQc$+EeZDM9%E&!SZk-v{!o<~rz3iWRBayE zCc?6KAqxFrno~d9>SuOZ*mG5#*=`$@y55OX<)zXbL6@rsir)fKN}7fhLODQs&M!E> z!EX{sEDwdxIlq$~ZHiK0LmlcUkYa+s!aJHaTiElk>7iRA&aM^a>SBM!k|5%6!P>8Z zEMe1tn%HSV=Ki{&^j)0~L1F_ijKNbs=ruTv5pOKRa`~D9`b>|QVflb3jSUVF#-!a` z!zJ7KG5;Vyibak4H##a8x`w9IrWr;k;ck|gH7ydCj1dVolUghD%(QtqULZhzRI9AH zSLowh5<83I*6@KNv53v01>HB1RL9S!XV=H2X+kuc(P5z-AX}4Tt;M?795;OOOK);P)RNB&O62Kf>j%1l=cKrST2?v)G!f1YD4Oe5jm>W|u&y2HLi}oQI@T*t(@bPLnpkd3ozi3kCe6NZC+X#2vD-CcDzy zvH%9Y{6=&=Vw>sG9(XK@tF>+^ymv6;beQvJuP{vseMVN%(F-J@8^U4Lz`|0vv%rx=Q-&JB8$~^*0#3Ju!a*UNO_jvYv)S%w~@s zNQ_f1 zhmpi%tvsF}PiTgqKgos1cW*k={+-c6-VxF`iOpAwZo>{kP zg2%#k;aJL328mZsW9hcB=aD`I44ypglA^~dNi))tjOwMfZ+|p`vRJUl8CXX4k*7}% zz*@F$@4sJaL{TQ>W~dj@R(Rj#cKJ`t{(3#ldSC$;p- z>mM2edcbk0(?C)UQ6K}k1XgatZkTg9dXBBndTjy0+DH*UA?qO!vpcZX@etvaPrc)h z1gXu+Ym;5F6DWYpjbut}6%C!V2B)mU=c81e4Hwi`f?#qg^;aPqq8{TywRFtB3 z45OmB?WS!*wlY1IZ?B{@Ojf~f%LNNguPkaKaw$)pl}4u8Y|y#$rM+|s-#Xdf>B1{v zrDqiPFJ~{!noL8pVC}cS>|Q7m-#cd|#N*PU2E61Vo2fZf8B`=gC{-el@maLvXit&;zR&hAFRoNy-zwRq!XF1l*tr2=2l_6^+O)i%$SA! zX8mGTA?v~R)@?$^Gtnyc{A5!eAh#yg7{!{d-1pYjJ(Plen3eT!7EYzdU8oQ~-Wpy_(g{b5ICFafh)7*BdSPwniisa=b&LlF z2c0Zi3MKaLHe{^(W93DinzSn?jKG@rYt6S#@J|O_JkD}Zz9(etLucQywfm<=Syp>b zpDMmigNMwa_u$Hn=xB$GYwVi>*gZ!DCdk=;jpZkhtN^C1Gy(hpu2^`0iG4UCKmY-n zvYcn^_>21wOWUxjR|ay-M5<0={A(PO(e81gra0EIPq3SBgguT+DgZ(+FU5L$1QuT433()_1BT_7@391KbV9_*jO^rb&57|c zaQ@Q62XBpl_aehr6lk!5458UJEL;re7Llb?u1SD!;;h6}Q#9-cNC-HzsH~EGD)<;< z$vGj5n?~UvDa=npNUj>oL7#|1A>;e@rg9Ifm70m;OwZ|(#+j%l$m=e%OXEM*Vg0 z396djFvbPTHtf0o~yQyJ? zu>e_F&AD3Z;O&@==G6jjM)z@zR)24kF^-a?Hvo;yP|9-o`;bOzI^2hX3beiCjq?Al zkeqxEH!0fFyoi!k#80NkD1k;?c`CVn5ayXDW208_bQA4t<`jar{M}2~3bM+v)77&I z$cG^xR|Y^Az-9&d2w<|M!bmDN19_mHeQ!n{9;;izsTB?^de}}TAACVPN^lkoM+$6? zLvNl_W*}T@drU3h)`t@(8>aMa;T66#q`H9wU6#pef*00k6H6@nIS9-mypa8WF5mx z=7RFde_qesSo~_Fy4!%y9Vw7Ln#YtHg&&ICTjF5j#DnEAIAFdO=F2cYN@Tbp8dVzP z6~@(^h5!SiF%J;(QP5bxNicCN!n`h1o<;yUaWo?Nb>WcqLsbq67)-7zf@i6acMz?D zMp@fuMu@^R|7l-DEp0ijI_F!VVHX-A&{@?7zW&?lI7JV%JPm*tQvfn|v3U_GxYo3r z{N3vN*Gw^^z(A1-Z}Zchq{ED@2wpG%0Pmt!JI(~oVj(U6c<4kvUMp_JH0~#2fobpn zC%8sBvX(p3cpR$ys;6|-Lf#L>kkUtd@iQ2NRAcZ6^@-S(g*3)pUFuwjNWN{iA6vj49^oJYVL+Kk zR(D3WYju%7DK+%*FJe) z4;F=fKmY+1wK*vf!D!4!x?$hwlE&oFf_J9ASqG`|KmEXmV>Ut~PcpNPNFLqhc%eih zB9L8Xs4M~FmUduT5{WEL=&iuT&KXgUZAuqyhY@mY=RzyJH-n9=jkzcHFZgAQpNwxY z8+>oRlSqKFvzKS&l70}hi9m`arq+u?Q$4xUS*N{NLzUJ7d#^~yI1a6%bpRz;Olrcj zVck*iQg6F}bg|*Ro4b{Cd-(uzW!l2}#4~Rxg$#`5f+=(3nGqw=^6q5IJf7m#U^2J0 z`g!Md5sV;ds{20UNbsxdtQLZsy-ZXYOMd<~4vJ@0Qglo;T;g#NVkEwj_nS$v?Tu+- zmJXbX&>*{U96T;?61myX6ON3j>&HfPRTRofL1dchqwg)Zpm93Xy zp$cU?FjLr_80T;Nnh-SlZ2cq9rQS%i-G`@*5Sc)w2E8NT|U|o7kBZ=h9SJ6(@kl4QeBF|3y;tYqlT!al|+i;;V00y8q!{+duwj=uCIj|M~61p}$m4(?_X9fABT8>iTkV z&y#-x``@u9i+#VA|F(Cy|Cj48^dIbRz^Ci)r;q&J$v?mU;`P7#f$Jsb3FyP=|Gb<3 zx7!2vSJNj{f92o%eGPpd`mYH8KKs9z#ykFx!CuY%vi!=_Yx5uL|M9=S@oDisWuF7* zpNxOd^l9YB{Z5EZBkgO7+=NctF^>d99 zpYgrg;Kk63Tk7Y^qt^>-Xi&W>aickC)mS1uR<>GUn!qv5Kt0h&r^T8UH`efPvFD0=?m0*Atx;r6fFZ`gCYS~0 zftN*!h}t5)xQfmEyVZ;a)N%G!>ANoqElg0+(h3g)uz*aG94Nx>1QGgoSJPR^!N*R2 zjfiH}s06_#5Kvs}ZRgq}Hr=#?+FO0P-BJ8cljezm;~ye7m4P?R%=wEQ7Vglmj>B+- z;&;H&&TR6Wf{ep^_fRe^xz&OZaav{*@YVavP3pa_=U2TnDE8>zUHIdWf@J-pI~jCV zq*B>J10tiPudB2?*wWVOd9|*ya&A<0*It&1(P+0h5zEzm^F-J|x&;O+q6CMXIZqf}t zLS0DCViLgwU}i~XYQHt0%pMzc3M8pM!rN)R!|L8MYv7n9n`l-57{-mo)P+zDHpuP{ zo-?d31~Ry#6rli%GQMJwgVG=FI#gB(Det)8@1#?DS<4R%aGpYh9&uUz^&ZsGVs0p7 z{vu|XHNA5)#*+ac0)e!AXxtaKy=VJUfZm~c`N9kSkxF_4d!X9&guwlI$UZRmO~FAO z!N7;HK&WeyHtR`>J99hCTxl61Zo~#$r~c;psP?99wXzh?_Ak|yJy!)8g!@?`%c%Ux z`m3Cn?$Eaeo+`{Bj?}^tphw_iOqz83XK4Q9DG%R-zBTBRE92cUr@IaA*@5(j;0@`n z$EDn7q-FbWnc7qRx5wQXu*0?})q@*P#<3R?(&YVvO_rIAY<1>Z;A(lqHmC&6yiJYs zMdOAl4W+I)1e%=-;*DtDW|}sB7_`!e9gk4u7ZC}qOH7nWJd@WNyP!kh-qGkAL!a6H zcOH}-{~nnX`l)&bU(;kT=>MKbatx5&5Ag58a%%!gnpZ{a2$q(KPl&ip5Y-AXe&{yu zYG&RhyV%r2EpK|e+N{_2sBy6j+SP9wCvn~XcyOWK!Tg95+5od@``7!A$&EZf__U0& z1*$G>Pzue9n#4wS^q9(@h|b%owa6v}UzI4{+VMS~QAm9ux*y|#ea2n55-Y;^;vht< z`7z@Y+_vCSZ#PLU%n-RP1IJq339kC9y{f=`dFcZgk)#F41dqhmIWf)l&?skD;ofoB z;IBMkfvVp?ppvWIXelf1YUaeJ3-l{ESyQ_4*2`cv_MVpgcgIti|E@!OAQZW{F)LMse%Bh~Sn9S~uo#c#)z~Dj+HM0<3C##jXi7d? zFYA?(oXsp-in^P^1LnQMG&iTAmvc1juRzx$H{ zs}9{})@W-ve-e9$H$A>0A^Pvvz@d51?hCAg9`jGFtt`R!~z+ zXqiW^t||^F=erG5udnI2Y&RyM71`Liki`4QR+mg6;8wZn0TRhHtp%z9FiBQ3eLOAZ z+<8TrXfuAj38poN9*HAvaFBcVVD&R&e`^6rAw_Dr@NMlBim!K(GXv5Y!$beRh$mS| zH4AP{iLeSrLLRs#xCk#pR1zyC`nE$7v*SltpYg~Sd75prq-~P68w_%omTGtMaF@!B z>vZ@|tSw=(0a~mW-9j0-Oc6R@&mqJ&1%sO#BvH&aS=@~yP~4HfSz-)}g1{~c3J`1$ zyyTui^oYPJ|GsU*dTYviHeg^YH#IH(vH9atN{ig0{%gopQYbwW!+eJCW@AeEq~hBr zNUd@ZG^Uq9(%048|0MDZI!2*P<;^=?a4k=6lCVz4jbj$678QHL02VLrwumU zf-WB@&-yC5CN&A&^xAT%3W2V+bi7H^F+>G>5anWXO2*WP6;Td~-uy;J;ZY&=^JNzd z&|nRk;N#5ghy`dm3*@=z&g_E4#r{0~Ls@INx zM6=Osm%nuBdpI$}dAkiOtdQ4trP5EsLI_eE!P6f$yudYaRs>En$-FM^TihloMVH=* z1W-K@T+AzYqkClC{egdA$xua4sEfwv6F1i@DXg?iwTbP6ldbfZDOtH@Xh?jo{hJ_b?=e-zT-Ysl(UU zZYL|jzo4tO*`(IVI7uItN}Q^FEj13j*j-G5Rn*hi}z_sa(V-AVdQ%S8%H5N?ZyV* z_ELBD22?pO4)^LeuskET`OtTV>(e6ak5m%&V&JSFBaB0quFvxBa++#%SOW`dqf^MD zaYPj&Wck)v>H6J7zP%;`7*&}$<(QUc+>mVV)54l8`V6noQEh!+_5?7 zb*bEbIQPw@zMF8RI^q}r`DBYfnflQ22H2^2!O64cY?wUHIB;X^eN;XY?_qKDzx!M3 zn+Bqy2i^i223Vq;2)3}*WmEgkN<VN?s7KrYW%awpPEKHX<9hh)CmdCK~)K zJCmJP!)(G}snC{=-sWGUO5y06De--+t}vgoo}~#Q=9x+aTLD{imZ{jc*R0Sxsd~5^}<$hnU4Du zVg_!4(|EYAxCMBEK-wHx=XarP<>yRZ8-_#=2CEpX+j|7bK+BhAW@mm}2WF!?`MG~; z8iRtcl8YGfU0#kIn&^@3^o~$^rq}kaS*G2Nl$2(kNtV~9PdyPdGFqTftuy{aR;6L= z0X1Oj{UohE>#FOhf?%3vzFx7^cQJFFEI!M<(Y+8IeJv|B)&l#IJ?ss|Z~O4qN{D+( zdahC;6?<0yB@^lAk-l962Q(UrOQ*gi&oMN=rZH^QHH!2MjLc*EP>Po&6#j1o=c`V{ zBHxw)LggCx9@Fy)a@`yWVJe=F+0#;q2W{W`cBD(uQbSBuW^Qj#M%%CD4&^3Qt;NTh>xCD4Jsim?}mMj%|QP zJqy;8*KH{y$3f+z^)8>j1zhE#dzo&ZZhn4~wDWHcwI^9aUFN{grY=lpO1c*KQqPtp zm274;3XfC#E`sCOym;Ukmszj)^WT-F3WZjqF&A)VzdjvmdBYbvcB4nXD+p&@9_{s? zFS_*t4IJ2!`%||4NRO&6%)fmT@igd`sKon_7S-c&YuVONSm2r%<&Nce{oC}&prFuL zk-$3CBaMWkX1D(|Pb8vyYo~h-1N@4>;SiI_gjUwS>`pD8G;-K$jN2&kA*WN44Khih zmnUAj>jKaW3O=@~EQOHBl}R6_4fP^6h}lmX(X1ai+}a+KaxOu0`gB%e|D}9WTCqXS znlVxr63`BA0{CccbU>Re0hrQMOX24Qxr<`B6JRx#7QXTT9#dSvau6tSPF*+{vG`M# z0@KKa`5qth*8k%~s`JBW;XB*jkszHq209c?aC>y=;~f1H?HE>k*u>ncCd2+jCw}OQ z6~qJTdWkxA(F-;VbZcX|o8B)@hxke*Ei%l84M&&qWiy=iW+Mn|R?WR~LczvE2&PDm z`2JR`YXeCf?5YS*Z~bnsL12CxKP2t8Js;G+~vFU zGNF51yY;-npfPuEYnUYVkglX0VyenckIvD^@lFw^D0wwJtxH zo`Y5@+pM;M zExsk683$(kXE{>pcodX3n0~du3{=*w02HBnxS3jI+;~g(WPxFoXk-(Q3a=9FnA$mYA-4a935$I-=*loh%~TqZ{?8@~&vU zP0Lbd^E3wS?o8C&i7i4CX9WxpJcA?eg2P^6T}{94~3lH+kT~m6DDb5kPC!n>tn6no{Bc zA$5}vAJ9BR@8+v)sF{M8j#^kCK!xtUQ|5H_2`mCxgVg3oIOV9)dVl?P7U8JFwUur%MtU>tZBM6R`sbtFi^^FzdY2sHq97Tg- zC9(vd2nH|05+mDk{_R4;hIwMZyAjV`g_k*zjpssNo8|*z#9ggQWcB``+VnI?V~3rx zloZNfA_Aeg7rbZHXN012F%D=P3^7sDn6!nWAkNWP*DqwWr#LK8TB<{+M7oLU+enx3!qgQdWIVi3kZ8*?0fYvSOs)s_Gil3^Z>*Yc!BHhn#=)RPSppGgK*Qu?+Muiff%3E8N6Z$vZJ+595iKqE z1VR@m88@q@3w~%4z$xN%T^kwWJ0t)0skpBZ*BS?JxvJxpk*!5DACvV@RJv*mwHwdI z#|>e8VW)P}kg(uJ^OnBsu)^WHw5nw*abH`}NcJ3fhPeb7#*FrIl`P@72Ocgh))Rjz zl#p`kAon2ZeFm%j5oBP{@v9$wRzi4Oxo~EsKy4ut;mdF!CO*k_apY*P)SMF|cdtxz zi5F6r%!6kdO+nbk0BT|8fq&3s>&Y)EihLyu=|w>(d4SDiwHe^>NZI;rOeK?72ZBNU z9$mf?=h(}Owj$!6o%!)Ek*zCy7AlpOI#F4r4Q8nW-zX>~sg`V{abs83X;vHhL6MY_ zQrVvl1X!7AV>Upoc;uO}t7`&J>B;fqjS}mn9^AMVXA$#rxR@CEd2-i6% zIRn_2h`hqapLZ96sMm+{*(#6dRk4@EwEy$L#_7FrnFSi~|9=c8WL;$~V$P5ZZ35nN z;$Oz?h!%gpL`NO;GWbO;f>3bsR$$nT&=-V!Rdi&a#($3nrUZ)Kl3Xw;igVz#$?)e& zp=QXg{HacG-uip4KMpcAw83eg&9DhkD1QS+hRU-GC@<7g-Nzkx=BaB=JV0w>-QR>{ zfx?Y_+Sf>ke`C1cWgn)bR9;r;Z%9})hA97`5PBVvNt2hNL7!la%RuCcAlzo|4W=N# z`siV!vgx6s_E#;5T03tNq(~@}GP+FxVN?bbvijj1eKtS1KuSy%iVLFO&dg3tCj|r zra!lluZJJ^z`SK5(e9K!gSU)qh|!9I=sIy?-kaBj6f+5Cw9iIZpJ;f_bac4nQpzaT zejd4qpCQoAhL&g2^a>PX*L&0udo&HAIlb%S;?%K+8~UKf_kN!)jbc5ZuW5z?2c#5{ zA{(Z83@NvI?f~;im+>FZh_I?y-2Q<6AlQWhVjFHfU@vaJaVVtHcS@fvI0bv3sin{V zYOt*Q(JlJ!eSJ3Ca%(SrbW2*LcoFYeJb2J1tufP;0WmTX+%?HUA$EPnV4}&ziF;n@ zNXtJPb?%uk$@Tf5y~F}3PaP_w$eXeT^2=|@FAKP3 zHm~2vlI!CR+-acHtMKDyzABOj;E1k*pktn#wtG~ainbj{uQ4z7pJws0xm|LRl1|Ie z6YF#*t7`8V>yGK?IzDeOyibAVP1;atIEH2DgMrYAl&nRyoSjbBU$e1^MAriKe0*nW zFp;oXJ|C>+(CKj$L;9R5jPVr1tHZtmv*^QkT`G!Z%<9y+as$pg*ZqpL9y29XL-bH^ zgS=JIz-j!V))1dh(*AB%o5wjajy^u@KTQ~#I9+s;ri70O1!9i|ke6N02k7~g(~u#t zC)cguF<%umceOqPq*Ph(WV6+9NU~DXGh_*juu`fw==5Cwzh}wRA4AA4RgX!fv;#L_ zqm)_qCO4>WGMq=^@?k@AwP^G)yd^WHtRV1ACSG=c?{|EX#xHLLpn;X=A$tkDrJVg; z0T@d4OrwwW7pB9tt%QLG}eE0?IzFk7#Vd4x2^>3LlgN!fCE@H}b@Bn`_yw*J7f$xCDPlst<6iQha zURnqfi2D557sBKEFnq`^Q8WA%-rfM(zbeG)sR?+Jk@<9rsD%8!3GEOB8L_WRRq5D> zlZ;^X!*e)P){v!$`dxw5#20v;Y{aX&guTdQS4|wPj4zuKWsC8bfJP)jgo{i84aBc9`unU%hRzEs9OfMwQ*B0E!D<5fyj)&$HsCAL zyM-dv+YWbPhXR{0{%({;zo?^;Ko)@SLKFfkR;TvvKPD$FWyE%IE^?MXelfdX>a$8> ztKi@d1`vzSUu{Flpnz+tQB%CaSu%54#q+_N%zNmTwfYOOcYiREDw@`xF`3QqMBCo; z!Fl$0b{qvy{*H|~BK}CWP_Uh6ndK}+%}VjMr@sgCWT*OV4nae3<4tSJ#xSx3*0!1e zq0e2HCx_w4Uy4Mu2(`_gKM_C<^=p(|1f5B_vJXHQ{}V@xsz<)d&rH!bn^!kCls&A- zI!7}E>H`{nz1ABOuip@&r-e0gFj@W84d97?9*JDaD@n=R1e4Pf^a^|WuG=|@JofJG z&OG^2LT>6H{Or>(T><|48^iy*xN|5632V?3>JSAAxO+H7w`8Ax7zFLS8bL<6dQSLiyP34+y;6& z`LlBNBiuT41yA=@P+MWL}n}FkUOp z??VF-?)n|lR{j9y4ZW;tf6EEc=9crdQQbIXcnTs)kK1RmUZ$}BnBqh$Zp+x{UsX6$ z3h!F-3Col@p;*hKKO#g-5^q$}f6BNj^z7Ab#D*Y>LJ56s6s|M#L@I4513m! z{;Q}a!!-DY-WFxq_AKO!4J4W5y#N%mp&EfoiS(NFQe9@SQaS<5pUy}A;u$zHncHG1 z+n@G+k@FC>jt_~s^N=JQHMWsm|7-`*YJ37JC`Ja~j8v5#nho{vPVtpR=*d5NV`>Ns(yWhrXf zFld)8i_PekimRDSPN99yWOh8Bm6_R}U!*uOKqu1eq-tcX=WfEyASOOhHT|NIUrXr^ zPA}Pg-4~b7#L@nI)Wp79|AS1s3e?Qw{2pF8oxkSedF*O;U-}wCckAv-fkes&Gmsg< znA`=vU<-u(`y^yc8|TRWLah7#h}T_V4Ws0%I*m6z=$E}6=RbC)>ha{Bhg%3Deg|o` z$@;w_oRFjo&RQ`ky{~Fup*XGjqYCw=ZZ^lwW@5QcqpKe7NJSwsNJ0myrXSLw zdpKo{85XW`SG=JEP?eP}{jNyz&~RrT=LBn&8Kp}kiy_U>Qa#eS8~m{S(|8Rk?QP={ zm*nwR*gyaEUF1qDXoo2G;$q7$LN_boLe!m=*oU}VzGS>|u9#GgWJi2IklJ#$i5CPn zmKKJGkQFZqsZ_a*#Q$dTfjKMj*#NAIEo)M?N=m$oh##A(o*gm4_G$^|q1!8nIqqr- z{2)T?(f`m@@?Cxa_&H%91&D09J_Ko737rc0lWlT;1&<>HDJo8(t(}}~^|c9?QxBFg zw4bIW6UD3gJZQvHK0w#SCLo}i(aSdF|zUA@?M z+o)Oz9qp_#bj%-eS6Q(i)V%2QB_}NBSS?tF}IUnmnLeGYV+S1)yd%;F2yy5^YQR<-2I&UfDQnmhi)hh3xkJ0N3^}9V7n~Zbp zT(?_6PFgC3E^l12>rM(=q0N?el1VRlz!dr3ObDEFsyJ9_NTZ&{*1G*E=+t`Uk1ta(&TZ2x8g!o0cDO#`RIJ06XLsmMXX_1#36hkHc+vHdRG$H zj{-octyCnI7Vo9J=+PT?X({`YD9yHXuem}oK|$a0i%WMwzN+UMC0X7D>VHRJ^M(2^ zwQG`v0kw2rx;WT&gm_{h{rB;WJDiOI#lYZ2uG<)G$cODLZak2AvImc!)SF2^wHY;r zgncl2zd8j2o7$K62?qDqg^qj1^`9j~q)xlX!l6tPo?Du|?0(#U@$y{C1i?xQ3Ts49 z3L~?wv=i#ep8C1QjPT_2}$;>=eFj*ZG2VNhNRjy-%Q4)xuK0#E18e*e?RClT9qD$ZH1 zr0RG%UR1`;_praHs+>xdKh4``PU1kKTo&+YJpbBrP<2&=aL-4+19G&E{5w02 zZze_eXVoZ%FGElhP+SN^P>JXcD6^QZPUJ?FahE&w&`o5DI8HD)o@+{1XqxM%$FS}` z*@Q`As0@skr&h)N=OIX&F0%(>RVi^W@oWRfHSWhO4pxhELDr%qp9pbHUSF(}sCTTz zI)I0R8JTd5JJDC0wC47AvjjfE=BHhxp&M?i0*3ZUV86nMot&2byj)t9as_=>1gyYA z*E|es&{#D;{u>NG|F46tuRCbFLN3z}$u@Cm@pD%OwaWI)?T5L0MfYwj2S)z}t^b{) z;@uBq4~Akbg;Kt32DsaBSoX36+Mje-p11;$PlmY5Rmon1wH!4M57HqD_C|{YrMRF? zup{!g#A*fOp>x%c+WT|~E>gi2$m!c}!yd%5c$(=Z#MR&-ik1#*Y-%Gv@QoWpm|l&b z=k5HU9uE8^@8$|nP^%IJ3a?$(#c=<0A_cX<>v}PeEIYeM>nkHVY(_0CDMXqAABR6D z*o!8vewv>iVy5R8owGeeR;^KE1y*1WF)*PzgFy|Z2Y<$iP*bY&tn`$%BvKvd;1(*s zJeK;`%1mMVztvo?J%Nj1L+&TJ>=yio@I)Eo)nQR{ zocvpg-!`wW33GWtK__r1Bl2K2>KM6JraNNDa^%a1F&_)NI&JdAGu918-=YE~r7q;& z*lTKDgJyG{FzBlnuHj1O^Gsm{SQkcjh%Ey|R)mUj;H0v6UoAgfYk#8b z?l33A{K;WApzD=ahS-K~7x2oAycFQlv2YM|2TWg<9}=X#rdnDhJ@s>p;;#}@!ZPlC zdbFVdCZ#Pa{(q2}R8lGR$e3eHXq9w!Wr0XnDZ@TYTDDMEi2UXO^eO(f1Terwb)pbuSW28NBxl%&ESiD^<=kO(eGzel8fj$g@>>Dq6K%<6Kf;8%;)>bOx!# zD;EhA0=8iiA|sHfi4BHa6j#?7>1nhR_LwQCSmHjq;^Vj2nQg?O=gFs2#j zK4xX*2Z#I>@dGZCM=;)vbl4+aif)M=>}VYOYy7X_=G}jI0+mOan{Xc}GtFfT$F4w1 zRa2UjNgt5`3U*c4-H51q8iMOQhzSRS74V$#0OdQ{3W1dO+5I?SprVqa?T?OuJA{=H zZos7Pt;vyz-sI^mjMj*)S2s7B(PZn&7#5&)H!ppY#l-)806(2=wV%UrM(@#}{qG4M%+e?~t^n)R z#1A)IAbympX98_$+B6&W9#h}=FqC|@%+#f$!$ObdhECnTR)V-{54cauDUY6DlkrIQ z44WSM{L@7NxEvsBxaS0EC*z-#RnB=?xn5xpe6wd2iTl85%u;!ndNu~j}l)W@@PaI(CQ3Pep; z%sq7CNAJ9y;N+GAZ|n~z*alI<@w+l^3>7==U}riwezo%UIf0f6S}emBqr%@2#sGWl zd}L>5^%iJJ1h%H|*=kZRg{@4}YWroTubr6^4e9>kV>B#o@+}{Hd*~Z4JAVR<60|BJ zfF+3`mf`Ep!oxs8vb+DLO5DbwTlHbaeD4kcOQ1Bq_Fq}SX|JWD?GUYVEvWSIioLrQ zF8mZ67O)-X3x@NWoGv59^vQM`81FEv>4zRM#W2LB5!I(|NKud%#kW#g&-IGh->jfV znELX&VdVS7&dJR>UP+sHMpSn5XC`n|RjR!o?GPJY4)UK`(kYI3S|;rrvxrFwehopJ zvcq3_yU(mJ&`A|znpM~ZRi<#p;DUAuR;zZTqdHeHt%n_8wcjr)h6o2P0l0DrOY=UI zs>}Ij8X1`Qt(ZCraw!a^_M!EnSpP)Gx6S2hXX}3yq{_ChJyqw9St8f4cZYK12^ngD zih#uuVM?iF1DfQttKxWYDbh&yQ^Hy8`W944@VfcQ(s1TTt*}xhYPd;C633}#wl^KX`uS+a)W0j&?BeY=xA4Y6WCw~s|JkCEFD;#%q8v&y;wRNbX%2X569M9 zPX7bw2fq>ODqJ-eQj#UsyP69?!(WD_jDJ#D!z!i- zDnlN0g8dNE{7eKgv=V*rR3vpjRrKfeB-evpxb!V*5fz$5ag3!lMfhyZDalTvXWA8X30olm6Z50A>Bl2Galof_hWf6V1u7nL^X)>ue8{ygbI^UBEPjY-0iF&d)@$#5USgz=*mzPxVV)hMzbpwb6Sixz_(M6V zF$%aG45-ZeQ*2gXqfVRDS2lVe85w}aKw|i28wDbyn0#?~1b{t_$Wf7S)-W(wNM+*@ zpSCv1;8L^FJTv)iZoKK(y&)};LQaJrBT;`2Z^CZbbT#M~!b0P0s|b!@eGs`U8wTMx z19Gdpl}1A}I(3Ugg%DCpP0H2H)>oG6(Tp>Dm8mt;|M=zADkOsTLRKn^joGzEWoxW( zI1b*nX;6ESrSTEa&f>B>6|f2^GeD*%4Y6s-9$l(Y4n*pL$4a)?%DFoAEcjWTTb@d`S_KKB4F+)be=KtNQc_{`USlaTLbckh zu8E)y$ao~DK|2Y227-d#x1%R$oOa97LFA@EafdlJb`6LBwoT%=;e?QqVI{-nZAqJ^ z05m1}QG|(Z@{OvH?#jJD-`m+FY=J019>zo&36{Gq0b$p?Rwng-i0uUdoREQ`31>z42HFl4d;vD59M$MPVm2I3d@AUh=j=!KTN{BvFimI;8BKYv@emxiboS!#L>*9v zH}zPY0y2XKHgKWuihZTAAbbO{mv>fF`s$#Vg}5Ru?K^W`7M?|ztT{kUql?I@TVk+* zn1pLMsH7e_ha8yESp59g?l&bm3(i;Vju4;K-;F9aSDL*W8CilPS5O^I&eOXAd0J4N ztGb)>ho+A+mIGIS!?g-uhB_`0MENl}VBR1A-r70qTm|B3KNo&~b1cE3Z47_+>)g+Y zU;i#4MAp!{cCHpsN*Y}Oqu;Ns^DF6zV6Hm2v|+`N5TOv{GHXAaWy=tbgBcza1O4Sq zR$z1jkVc{dOhkaq3x1E*g{GCKZd%{z2uQzZC8+D1F;QKmo~kK{I70~hSQ++G`+=MB zusy59TM3Z)X4e$UY$!&8Ix}+0fSjveow>ND1YN zw2&{#*a3Y63_9CKK1VhvyIj-8+VM?=^9{XK_bF0*ISreeDsH)$J&htjOpA+Mme|GlwkqToDK>?10WfMM`u+ z&=#>xo$ObosEn2b|zAUJ!ujI4qHB~l9d&SYL zWiuE)kRn?V%eLnw1A}YmqqrK+S%jLge~<%x#CD8h_7pMgDA& z(*&F$t$Ax{W8~NSC@Api+90M|pNOU%5Adsx$*a!~`?}8k02NtMP zQRJ?F(gVgsE6iCRu%)5Y*Oj7upuD%xAC!ENVRO8JZ68M8!MHN`p&KUbWX6f@HIIf$ zKJy(#2%x<+<#V`;$U`gy+Im{C#W0jqv~!k)pUtD@nO*5|-9PIc^lM>>qd7mPEj!Gn zXm;$QbHfNIu4>6)+sAiQWqS8h_F0SgYNI%6c}&yP7~){C_e`-X3wdW9zFR8%|GFaBZs;2ahfNC$|HdwLu2A z-i^)}!r=(C&%3Y(3Dj8*01^%Y7;_NETe62^(qP0#mc?qKv+WJbGi#`|sPzd%ykDsk zwU*;=7}k1$Fp>unM==mX&lfh~i3Y^YD~IZg^6CaH8pm4|}oBwrlbu+up{BM%_c)#Q#i*I-mwxEg}@OehvL)(BTgPMhujPA>jA-hyt{a z364H52GjBoX8Kdmp8l>f)QF_h@MEpKczD;>&Nu@ZcLFaw@>_fS5QUQ>c_Ry(f=1dP z^H@ENTOBk0_QT1Riswm|PD>FktsJhQcHR_4o~{0A*ae!CFn9*Y(=@Qmgd~;Xe*d<` ze{x2>**2@Nb(f!Vwjnn*YHj_)1@xMtQ~k~dz>%r@LD;~i(zgJ4zgx-?h)o!KDg|bQL&qp`^xArt7MvS685-A>XO;&hY@sp;+7PcSyeOL zg!z)v`Wh;=^1u>& ziMX_%;m!2Dmc(NXNoWbES%*}hVk+}|;AQn|wr2sZel`VZr$7XVzby&?frwI%rOOF4 zAh-i=4zW0~5pmLfBt))$G%MYU8<9e4=YHv*`uQh>@J5oF-Uk$I;+Nx+HIyZ*jBnrL z5|;*gidve?VN66!v?}AXd_{;kvtUkAUX~_g3vRRJp`Wu~SdUgeSr*Y&wXyF7P--Uy zW=naM@&nEo6P&lE158K>=HfuwP{;D?f*H*arHV47V@)xGjZo}-{HVT5D7OjpPurs= z8*Tgf;Ns=n3E+2@8d|#+GriQBD>xcx&KE%ZFcGqINEPv%qlVNDqO)*Bgd2C>>~aiu z5||N3quEfFBIpk*7m0LM*yE}R;v&j?hK4hS)FVDcA6&LXL(uKAjJY;bc(BE;5ELWt zvdY6&XC5&6}cvV3Cp6}eE-?7a*YO^^rernBNGLxypW=Y(5by2C{L@FHZb=33QT z@dItlQx{#NQ7hTN`G-$q^y2!{3;}-lFioJ+EB_0MsQ> z_)h`%job}jCB;j+=Cx{bc;l|yOfq+egDTzi6Dw3eR5w`nW8$V29mZuFFE-D&6mL~?N+ zI&@$Zog75{!dB2|DP+9~K2NV9>oNC4j{0ZTiKwgXhq|VN%l94k8olJ9eup zGz!o#(wVQ6cob0n`r?xS2=fifNpy!c&6YI(WUBUN^BTUMAmMRK7~$>j8kPM!#>Wrb zJXQ1QP~m)$s__!fN0|Q(bA;Aa1X4U)S0X`~J?-)&Me1k~ECqD=XYNT<*(buhW5Y zV`%eQ;U)zm%&LlNQqCVr#~13~2ErmWEO)ejoo;S)DDgA)5aAVC^~TwgbkAdn8TRh^ zUpyY&0PBI7wc4Y{ZkAju=&W@1f&}2W!>^A#q0SK?5qH5#W-D^Mnd+I7YUc7qhywhS z6jS7Pdp&@>H(G9>axlrTxrac6k^ zjSPov;s0^8M_BJ;(~w?g%{E+v#TBvfH{C)U7Jk>~f`!^`eN1F?^J%S1lngLA)a;mE zB{e`H#r*e^b!K61CQgINxlY})t)$Ys$;UY(DGO!8FO|+|TP0d-)by`yGSocg7yk4` z_}WE*>t7zzR6fusB4^J9sTgfcLM;l(G`pxx=^<2SO*@WIgeNvyX zR4rSZ2%?C&2;xcOid8w`lyi-9=o#uzFDjsCijcY^AVR`%S12Vi>zr=tjeUpkFs{J` zG0|?Yj|UlpVGAD0X+9J~j}wH`8AUktt;pWIDmw7iDQ=z8%6WysQBl!@NDb2ff6yZ( z>NmD0$Fa#2oW*Tlbzn>H$1<>Wjv;|+HQDrUa$rO|wnoPD9JnQ`DaXxiuNdf1XspT@ zGmP`*ka8Oi_UC?B;r&Wi0#$3(LP91g8FFv&Py}@+rVod^Db`hC1nk?S~j6?)W{?xXNCkLrqHmKB+n?M6^{ltv(Xt;g*SVs92KJRi*a{n6uBF_%9#khY@3X`Zisknu+|1BBw%MPIWQ9&&7t#hE zT0V_4^Hjd0pEr3HYMc9GTE3aaGps{G=+`ODE%E+7I0k4p6iEBR|KXNDT#NN%Aqi}p z>~ojv9=Eh5NUnWke%QpaI0e5`V-j@QNWMZU@eKvQeqhfBPko{ClC&L{#kgRwrW{?G z0o$m-qv!WrgaW+dzy5iTgbXyk|VRbjWu8 zN=ltFL3zH$EfCTNzydpPE|M`_EUa+36C)-9bhOUiD+dQgU6`1|-Q<*MPMd0lw7~O) z$O?y2O1y(1kh>|dVj3CK`5qr~*tW&}^MRN)XH%35?2Pw~wgm;_^oO{TQVKUHdSF>N z8w8VqPLolu!PxJGs}yl8uAtCH5#1@`l1y;utj@|gN`2-B9|asM(`UqPQ$XA~XFjxE zUxJ33aUpmLq-$1%7fV#=<#d`N^X0`;M`^P#ILz4JyMp|ivH0L9ig8s%p z#yo@atzFW&Y(tQskYE=hP>!zaEmSiRzhAZzu4F5(ELRbxsOwKM)I3Cgu<#?1L=_>P z&NqS3_eoL4LYuC5VS3J*3WEiSUwCA%6gIXZ54bp#+CU`OS1BOgn(>}2@CJ%$TqtK4 z-gi_c*Ajo3Q(Zfoo)KZXUPW01O4;x~$=EJA6hfV*5)fM(Xlsx2HONStL7QP!AtQ>R zL6&d$tBd9?oMS@t{uZgxev4l$wYxMnO*Ah|R+Ql-a?I6vjPhsxFQ&BIY>V6eK^*v?g*SWf`9+2zlS z9$R=PpRHCP(fO3Ah1b-Vi{nbS7cj_ImEUid`;pq;#QOk*g(g&*;-E8UBqo86@G@~L zYREl;A(fJ6W;dC?P~+i_XC+Yw>C;dWneKINb2Opbxe_{p8rXy7A}Z)W3x}(ri}XkGU@PJfv+R6l$5PpHNvCo zQB29Y^?R*}-V)^B?KHk10bJ8v(rwTz*ab}D^sSx-mE4)0h5xojvzWRe1ItiE=hZeB zV78xEsUTtjQM9Vl%da#JN3#o=+^?qf*bCfS4x4w=56k8YAcMOrvf|3P?!VA)7sedx zc8-QBdg+!~Mm84^2I{E}!dUf*1+w_CpHQqT@GyX>bXRVG_+mIlv5*z0!ld~0;3ahB zum2A2mz*7_83hnl?Cz5OcB|zH0 z_2LbiWN7NtjyAydHZ)x3w6@Ly6x-iqNZ>fLu;LS&wU#Q;5PA2i7J4}=yXNx3?e03g zJt0i>DF5yztVcth(bhulz!Sa-XSy(chCGzH#rOL%eJrXC?=+GCT5X>O`2BzLhHWif zeeeRIdSFc@y77h6abK*rot-&jE>u@_c!vS~xaauKHQzUE?EqSVw&MJM(*>pbV1>@Y z`Fi)fj2|+mwhZ&kVnB`+W|y;%A;!})Mi(!(yl`5^qoCxVad$%2xLhmmBEkp9L+C*D zse2zRCS9s*3(1l#cf*Jo3xbQmBhYawXso)`s~ckBrEmvK2~DOyvx4)cHS|Zwt1g2S zW%PsYMB3TUMhZ+!>Sv)0jFkl5V`ftdWT<;{WoH^|%rh{=M<6;hi_<4J`g@&>pFf%m zxQh~~uMh>wqk+57o~!5%yishBH>yEYR)05JiH~9}v?)u}h}kuTM0zv;k+WRCfLKz} zUlWMDetn(X#Y6P@1M}>A=vPiUA$GYepbL9lDS#;0MF7!!vQo1%QnD6sIV4P>%(?I% zla1&=wMEzsG}qkB6v_^yZVsR91xFoFT%2i0_~@hyDa;NA%tt{~!_(To=c{wla+mnh zta|n+$r=VNqOxSaHNtv^1{ATaY4az5jL7Q$p{||unC(zjavLeG_Kw^U&nHzwjq(;> z)#t{8@X#GS%ioS`J<)P&*Fq&iVYg;9p!(>I6)d=AYu7ZrxQ+AhQ_SCPC=<5!u{Gq( z{8nKK7>J@GGln*Sls!g6qqY|}s?M@T7oBe27K_6lR zi;|HXa`GWmM*Y4ntI|1j^+Rp(H&)1qbfT}H?z>diJ2i=%eLO0Uim_zZ+0s^LgN)~Wvy9NOdql?}pB7*Wt5 zk4PHv+{X1bX2Iv_Q0EXJUT-l~Lu5}?Nlb&AT~a4Za{Y1zBZU)$QZhcarEI)2QL+nA zv$0Orlbad0E4}_;nv(XRX}4~r-=4;A*Om`Ind^TzOGm2QK}R_+Rt@6V44P|f)qmmD zM*pbwlr`+b6(|u~y~#`0PSvpG3X;Y<($J5s%R&I*gwRW**7w1EDhuQy#LSBdf&4>i zGd|B}K}J6~T#H3W*Cnde}}sk}bkPSYZ= z@)h{?7BTk7jlV_#T)8NI-wK$YPSSf{2o#LQ*>0oooUtc|(;)tQER{+r`UXe!r8yEn|}t3Y#@2I zyhJRVI8YywuKuG@5Lvs`nZ;D<$yw~Oq&S_T1ZJp70UkeSN@))XNW2t+$njv+-Oswc zT7C%>238Z;mv2>U{YV6KHm%U~=fF|nj0#S}^|Y>+h5BB}vp}%-BW`KvGh;CSmNX;4 z;j~(&%mSke&@W9v@=^5kwJM0t9FbNon=n@?vKjnY zYvHPJi}~1%Z^8oxA9O$+U@?uaPQcQ&!aj`tfAZwP$>=c+I~>DWrKCgRhXGD}`Q(+R zir9B2ckvP1-ri9>%K8GvfxW@fsW#)w!qrRbZii8!-Yds+LgRRXBtH`XvTivz9=N}D z2XClZj z3(M0gOo%6P(8A4Hd}5n?I;E;ukw_Zpcnd@&jYOdCc}XaL^_to81o=y)7&Y0o(}(hW z;4i})3+9&;cR=WS5<-<&5#l@NQv-NGku4mUK>*hegQH_)uCF0ypb z&EtyVL}{rF<5R}mvIVK6IslH$(}sEB=GQ8VlcK40iI3(>Kzn!DDh?6e&(z~{qvxyT z*Oo39G^+FlBN(jINJ`M97*nD?P%~|{euGC<(Eow(v2>_1#FFvG(!Bbt9%aJS3t+SG z-ph84Wd(i;{z(XgbNT6_T`IjRw_;&?#9)=i`4mTuQ}YJ0r%jkR^NI=zgbP2nP0Nu zhp}Cwmcu4Ml6NGp88p(kG@P0cK3n@6aG8*uU~YJZU-!dCeh)c7h&jI4LuuCfd8ohJfFEzU(9RRIws;rFUbUxu^FyeBSlJujZCoB^SdrqIuZ<1+)}6*e4oW^}UZ&!=jh2kkj~1oc{2cF=4QLDa2%eXo z-h#G3%r!~wJ>K1PY_RURmGTtdIkE~S0fkq}gMbd_>=`3gGL*D8h;+t>oGFXb$`^5R zrHr)Q#H=RWSpE0G#K`??Z!=y*zZ2?Z<8y;+mhkYjla;=jvDyhWNnVa=>pu zRgBBmD-JejVObK-m@y)1jCR)Io*3O&G)7^w(eGjeHG&kSMP|6G3f=wNtXuOlYEE=E zIqsc`P|OyL9q*I{F*RdZ6kNVKaLik9{(_=>1BW)?_I??H64c1)P8TXWY@zFsO$0oD z`Hc!5-BLoGUt)k^+I{rlH+l>>^@~g~xVS1dJ7_Hr0Y0G(Ab*i?S&XHyBazBMvW@dM zRN-pna?P?=K(>Qdv~9`H0nv+$3an=I-PG#j3GG$QmS2jYZ}bG?8-`pi;%I)eJiP`V zm2ep$mW7g4qdf4x*rS%!)VDbd{rQ;VtX7YBX)I}hi!O~t*T2M0Li%v@1PSUC$NINB zTOw9>_bP0IRPR+(%9bnJUw9{Alay~IEwVEYu@#^;i7~^#eA=5|Sv3uN= z*Fu;h3^8w%Pmi3kgE`Z#M@R4Oul%g zjJ@G0RIQ)BXpXtGuGYJ)RhjCIZtd+%9VL8;RUHHqS{1jol?z{xx8{hUTPtc~&&%pm z4F&1Tni0jb#d18=32g2}12AIBFKyB0=S8VU_Yn4@{N~g%fPRV0g+{SbewC=zbh&w4 z{A*gXof3vTf@vx1NlxPs%e`Li6Vop0z5jViT0Xwd^$u~kWV&70G00eMJ^040@td9% zalqJFgP|S-4SeH`2+~Z=iY3R}by*B+MDmkN`>{)j!ufTE<=f?8^b_#q8})m@+-GS3 zG$foV6gZrf{69<_wYiQx*pORNdSpt}nc;pGxcDv4Ztcqi?$m;EYFWSEq!~G2{wj@QAawvp5OpBO=hSk4fH;m*t>fs*nd#h@i=1<3uku#wo0Z zaH2-Ptfoy-;T59o0lRSPVaRWJ9>~ARPlwZv{7vrCyH+#lv;kjdvr*|2`(KhMQ>c3J zk%3w-x7=)Xp(W7G+AbSZphXY~-uqYF#m{q)f!wS}-++d$+^F1(FU+cS7at`8Z+nxg ziDrlO^7YqMSXpJ6WTpOq80DHVQPg-PCX3ZRn*^dw?7(#%bTmD~y z>P=EaKN_)s!xEtw>07B3c^BT+{x%nyQg>Abl>n~O!&A~0Y*#12D`Lj9=m1qmEY+nR z=P-b4B-FH;XFU-jW7RyK`Jq=s4ZcDoxC5f#42B5&pq%MBhn#oSM&*u@`7}7}IHiJO zE*gn;E_nVnID{mth(|S6!JS*b^{TdI==nRx#{QO!s*oAEa)5kC@?Vkngcn==VAk(> zlN5kpxJ6xJc%xs_COLx%(}o(g;M=U6wa+-L8U?x^>0_eOWA4&shK9l4RQvIEb~)5U zhUS`_#n@KWTE;SlK~O(-)^8dUG&<_q;B5%DTcGjNEf1HszfW~cXDJZ%`w%>H_r)~J zj-D~S&5SUSn3+{9#?&&fUzB`1#IlN2w)6inMU!ePr{iAoH{HH+0+mChk^I;`ii!MP9!dP1e&|vxMQ)vY9)F03KHYxa_?gN2Q`9 z81GX|%8h&f5HI0Z{8>4d9>*}5z#68D`l@K-Yj!h2L3iR|aw#0kVk7csJxOxm0v%x( zv*wTyn+u0sIa_%unp!mX_nXHw!M$G0qFKW-A1cL^%z&O657X3a7$(66%m4bvoB z>$z_7l$)zwo2+ZBv~hwFaSKV~)f|kPD+mqFF~x(MGTOd40 zWdrEq>CG@0-o^;vZ@jvMe=~mS#Om85HsLZyg-BZW>_7G~kzj7!Dx7qjVae|((UGzn z^ZO*?X=?yam{HzB=d_K=e}*U+;~dSo6VqP9qzH0O6nGex{2p zGY9uMa&Es;CH^*ayUnZdlSLD;v_te{tNf*)XLi);RDV{Xru;dZN^6)<@}^qVg-d*e zS>~t=6JS;rbH9WR$l6z0I-JrUEEVV5WwhVoK2~~!Hu{p7Uj7RTg}{(Fsyoh6>I;_BPJu`%*F8C^iSA*`+zglpG|D9;gH;FLPMy1fP0Jdjl^|PAW3_ zbyI7VB^q7_ibquwe^1WhtM>+n`6>u53#4#h_DpMW4S|N!+M#sgBA@!2T{h}qKbo)F zYd90yWr`)2jq)gkEU%%M4uzw4)ToX9T=R$Czz~MKD$399spNbyCU3jes?;BJK1$*R=Vm|iZS+r(R zi-^07-OSr{n|a#Va)-ISA9rbXH)sfSML(1{^RIDoch_!xGWpME&J<3TBptEV`X7EL zBQgm{sf%^RNG}N&B1(Jap*XDY-z}pGI%+W*_8fugG~b(2p&!OYM-WE#$K7~{EHF}n?{n=EFE1BVlP&A}E?I6K!GTmbXI!dV(w+)B}}Hsw-RK>e#V8 zdH8X^D&jxiH19TVJ2xG>wL%W`9Fk8#lAei%gW5kCECnLDp+O0Q4X7nm1tASVhPucx zX_V~sw{ZN`ZZ(;*;HF~5vw~L<+QZVSAhveqii%N{4%i^80hD(H2&JgSED&YOg zn3QLvY!|n?!8q&JF-y|{=2qutY-uW;=*w}|A=qxsF>%kohq$avvqsQ#7*9#^ARs!W!RncU_SQCoNad!o-etkRZdKK4oV;Mai<%WbLmk7K3~XCy(t~rduMgz5w0! zrN@_S<+a6OW7ad31ti+)ld|z(&e}nzXZ1E_*2drbJqnx4H<4X z75J??2i3si^5ynyu`Vu8p)-9-3L=RRsjQlL&4tQqtU%XOaz%KI>JOQMz&K#UWPL|@ z_67kkp~GG&DQ@m^usU0}g`vgYMyg_f5rd#q3-IL?3T4u>;93jYsZyB+`bW>nd#7w;1CFP~ar5eO+Ud>}1R+5FP~drF zMG(!jp-x5@O3%m?kSpj^E-*Jin9UF!htsd_8R7chGBCBaaJzc>r&upG%Z_%Xk8IN{ zHWS5w`N2mt)gSe0Y47O|*=$;RF+@pYxL(qsiR1#7DZh)i7{GpMU!|Z#Pq958g+oA1 z*N_0%HKKo+5ka?I&Oj<3AmI?8Br{$0NDwcb&4i4?CH7>;iPd}L+-~AU^S(Q(8k#|+ z9DMEjIYIJr*hPZrDhKp`eIvFY`H>qiO8qCS+N~bc(4DHU|MoWdL3qTu^sZV7@d=fl;G@{^eF0K^2Zo`e@B; zVRIB`E=dK@2QzK@&k}2DOSX#>=-~UGQ&$s^LR>cgC(_}kq&ow?UAqJ^eCQ?)GZ(Uf zR953H{@IJK##mN%eZ0AQ-uG^fR)4cIv%4YCcSg7Zj{_2>NAHt_1>7T4_u&6RT9S!* zzYU>IoV5KMwNMmU3*qiEYDug+HkH?3zal@PQOnMusvMoI*u=-v2WwYC=*$EAIzep6_K>+{vA+D zlf^paMWjfMmXtAUnLFp|p@H#Q^TR4h4se~drr(tiA*MiG98m&($3Yx&2AxtpQN9nT zds4f<@>G%i@>(nV_YXqITH|Yqe0d;*XYhAOH2;bS>Q0BVEcPh7%A+3qNt?p&VHy&_ z6$=;(S%U^YVrx^20~iW(gG?F}{<@4Kb(JYb%*Z!|>^_>KeC&TL!)I_MNRDLp!ZCP2 z+2_Mg>2-^i3sCYex_MK!R_nX!$-suzp=%a8@p`guHynKQ`rJYx>jZa7Cl*@ja`0fN z!;(|x4n*4ne-RAAYKAG}Yz_ag!?LefZ*xnU&(=(_fFKO(6jTd^CKjoI;5E8e|l=_lVXu6NFxjA2im~sID`|O5sskd1w zh5A3ShU0BOmf3*nNpi6N`IT=4BvfmO8e$jOp{PCB&+!BYC0^?+Cs6pt`2*>mLv6fC zsLj2|!5$3rEZG9ZpBxGacf|I12XIOzedPB`9cy99pdxt0BP*5#(52RWII3+U^D1Q# zkCVRP3%{K()Ysw6PhtnsNw4NxZKv6qXz~+$?ga{C3&idlUSB1F+$rDJh`k)pe~5wz z&x{cmaw@9*g7+~UTTAC?lCcij4=qNGGieULgGLBWZljLyon`x`!E(_zJnMIczXzfy zPC~)pu49!IcXaVjVC1gnua6$M?vj8>LLjL|zvZpL#{#!dkBXh0%-aA7-J=>UeqK-* zN!q?Ot-8b0;8nm1r?ZGr?ID}zBC-ZXTnWcjFG5DV8RcGUI@gZnnwUm>)KUF~?_sFK znbz(vN=EP-86-&?t&Mydo?3G=uT{1CJBwlaP6mmpjK3g@@5juV@4kvvq-lptKUwKAyAgtZ>I zzU8GWfG_KGF#L>h%#+<)#RxP{)6oF>!&wyFc#{X<3ZciU72DchOu($KROGvM)*ggS zvLHx(gRBctutK7qTRVAlyIulo3N^lH{YBXQxTmw%{(TFivejH?v(bEKc zak^xi9jh{1n7+hs&Yfh_tsL=bm?*Q$4)*&9luJit2P2W;_A@tK6NN};Is)DCF0HQ7 zzdxt0`J0dsFQG_^^Pbn`&>2gLPeLBmArxB%1S@Gp#UnIlk=K2%G{W8=p~QWg;4urc$Usx4_;@C0ULSO-rIMn}4RVc{sOm4alrfu<=|@U{SR>?apHLm20dLG*9F= zj%W7B$X6s;-TkxT`J*`(=wMUoJh|%_@ScNwP^?Ao8FzH{v>3@m%n&0jq09aurKc#g zdA>H)5k>!@U7!@hT5-)p9?gOn3BlW)#vT9R;rUOW#VNzB7XdT0q%>n3O6&aJpp|!T zG>KNh#)l@XMm+;r_gl>Xw1^VxEn(ez_q!UAS+?3A$H6?JqykzbZ!|a;a9d01g@M~u zpZ1*3#wK86h_sayfFH<0ik+cc6_gULMzXaA=g4V)?5xamHwU!3ciMO2t~aj-qtXEu zP&U79hjOZp^*g<pcD7cVKxFfKVvzjy|&7f_VK|Ogo%I8O6;d(B0U%=u3=?|bYr2tnL ztsDnJGF^~5;^Bx4bnz!!d5{C#``_24svaPr9wVa@HQV^?w>BE0v=z$n^m8>8zTpdp z1=wf^A){^(NUs8dB)3+aX%;XFDk$WV977uu$lsxpxf>xPT7}6e&As$wh81$L2z~WA ztr$MA!jAjMJ5`J;TIv_!Bl{5&D|{Cj$!N$*p7-KoC^M^V>JoWmld&>cGkwW`sV2^4 z8m07nSGo)vntAV}J%gThz8`(ajs$&sZ+BMb4oQRIi%0$Ud`O+>ca9~RP$d0>(=){s zy{s|!h%28>UYhrKIMayKV?>hBzr4RQUaSP7X8FZEV;>k!@0^A04m z0qDOZV4HV1bE$d77tpYJQv}13J@Jl1>4f>JigRB(H4gt`uZovmA!f>bGztEU7H3Rk zE1-B4!P&~BJn<#r`Mu;#^F|*!Sq6+mu&Q4P$UZfqx8nLX)A>C3bdvQoHPVLUspT)B zRy%)uePX_tq+S9xverxojo3U075I4GlZhYn+$q*OrRM@)&dJ*Jx|t(SQxnOL*x=by zL-0rBHm>7gESzxcZ(q-iB!L*9Ke*{p;ywG)lWN2)9suWUH}=pVfs-BXEGa5Ekz|2x zIKg^h_X1y|SJ9VkgBM7{R|9dID6aJ&&UO9*J!zXjg2a(%aP>3SEBYUU7Vq?=RXU;7)@HM$YKq+Ns(88=#K2BzZ@U1~(?h zWw89MOh7^A+1)sjx-*v!WOw%Eu>$v=Gj;T`dl%KyDAb`bCCbxVqvN!pW8WPmTS3hl zCx}3+LLpY-9azDGE?v$raD~TH(~VPhQ!Ogg;Wc?pghG`37Jy5_F`0>lNpN?wUm}k_ z3lsl!e7%Ui@Yd#g#Ht%YsdYNSY&x1q?)cX~(@Gf*NxInQ3{!8cXNn5T$Dt-T&#qeSo*!V)pj9V5(pfu}#(9=%;1 z@#~{?0o40d-v(tqsYJ{DT)#C#kl%7rMIF@*6yIY4&jfjPPwZ{p-lRX)>Driq(jjYm zbq5eA5lybyeY{-0!)d_7u{&LJL&MG+dOOiVlW+A1_&E2J( zN+-(ukF`NJ(UGO1!09c0cLH`QYRX8)@BGL-Fa}JDGz8=NN%JeI1ScN3lAw5~7k?}a zN^SnqqI5&m9+^pb`-Y&^ksDT6fGd z*5&j$7{;^^K3Db^+i2%T6B$5GAls4nN6-@vMjZvE@zl5Q;kP}lBAzA36atW%_gJDG zENE@|a#m)Q%cy!x85X!-Wo1@#Wypn>oUd+u-yn-q>4^Go8lz;$jjSNVvm7MsO z!pDK{@bGR3UH&$J35vN=H4busJ^*%^5A-{1DpelJ% z-Se+DC@iIjFKpq=pGp#{U8D<{a3e-PG_N(kQIMwRabRd+et9wOb$-sxC@`7|qtxm{ z9+l2NQQ$Wn2H5O}UV>ba$n^(aM#Ofb#yyRd+Ud5i=MdoDADq18D^rUa5U85nmClDJ zbu2?X#=FB{#NVm2Fl5Q!nV|QVCR$st1c#{fo>jK;U0v(;c5+!&g@+Xt=w=~CsYB?O zlxY*jiRdxumH=2n=Fgc^N{P8wb#_}Wtw3qeo`9+Z&xvEj35R9&Xg8+1&mAPq=Aj$} zu^`F85@cmUK2$l%1r$stFM^pYkb2;;36)4~`YfvpTTfjA>+DJ8Y`r~K5-$cFQlQB= zkp7BBfZ<4f^qxuyRMi1qZitsIBD9piy;XEMq*&MgAfkyXsRZ9&w(d#FbF>j9+1__l zuJRp>^;-jhBHN{Um%9`6*VtD=m=votV0#Tzbv_MX3ARGJBRPkjWaTD0E(5QPyZMF6+sp4W`T& z`UO+PaD(>zQ#3hs&t6#wM6 zErdU0p1_$hL&vky!IMd3XYmSr=eDpSmOWvLRcQHEg$oTZnh_Sd&fj?`l#!(N@(x^_+(fXYDe-fE$SSiU8v+b<&xwFUf|2^HBLNvCaj=ix40f&yr zc$<-h5kKBBPit+P$N3`FvL!Ekp^^iLqi2+fmqZCQR71X3Yn_V z@CGgmsl;Kl)&Mk4#Vx4DU55=@K^}L+#$Y_`k8KrIin(QrSy6Usrh+$M1Bls&E0imRUPoQvE#JrJa|` zlZLDd6v>SrUHxuM#qc%v+W6Y``Oky&d(*i$W;F@_bJVa`1<9I3G01DX=V$P4@Z9QU zwQaE&l><(#?PkrA6qqE~5)Cdx6`{L{a2!&=XZGU_de04e+obMuU{lh3>lw`RNEs*9V@r}s%Bh=Y&YxR`b`%=N{Qssq&D{a%VvY_U*=PQ3F^ z8Q5p(aZzMThI^^VC>(*nO%97H(Ac~Q)>M+T662^U`;CFgt@U zoU5_ccq{}7SGKf~AZ;w*U_#S81(`O*^Idm!fk0UqVgJeCf{5D@WJL6LbKR^F;OOwl z1fZ(jA-KyE?jwGm+xV;kuR1QtrAQ!6FH0suezx5VgtYz#s)G09!k1#ouq7$Z=zzP@ zD1(8@&~@B15*+^*a?R3nrijX@&fBQJN}oaxa+}n}0bL)v3i^V)B|`jDa*u}`wrqH0 zJE78-Ag%K1>01s_!QB^LKOJ?1&HF?Mm-RSNVx|3}Q8EoAXII0RwDdR$gS6XubSUhN9DGz-75z z&kUz{*N;~X{Jg!X4(a{@{S#P95q^_b5Gj92nKSGjb7C z`BO^xV*~FL94Bak)H6)PR*QD+Zl3hNet}nuZs{BOyk~KOD7y}F{mNE6>4u-HhWpY8 zm*h6pIriII<_!ty056InWl^}DX0lO|GW<^jOXEz(y*MZt8J~5 zPYl8qXONnS3=qLFwTLbZ)mb8-Isl2z;l$kwJ|v-#cBSbL131JFk-j9fu_l1OOpb9_ zGucd8c|RujtPjPVf+k$}&NdmVI}kI{33pFe{G8PwW1Crx5mB%i5L+ID`Nsxw!^inG z*Btptx>s$NS7FDB$pPV5vQDh*K7i0;|suY6ujbV zW-w>>IG|p9DT^%%11NB=t_O?!?nVLZ+cm-T<1yo!5?M{};2zlPBh364{L0U z%ct{$01NloM)Mw&DP8C+zjuTgLOa_hcJhN=ow2Mj33ssx?;Q=`@WU*K1`Ro|v~s_) zC7sXA$PH4O{SiPcoz8RvysRS_XkrL@vbA`BLz$gGi9lpNdeWo5vf7Ax#F2BybTYYp za1_Y52^V{cEd4cX*koGAjcz4SFCPwPf?&pA6GYZp&ms3h-0wJyusC1*0b(}V+6)}? zOiiP`ZmS7==|n-V>kG>I**l0^oQA(eQXd}1M6-W=#RnPKmPmrWmpy8y_iVOoJ zu~ZUCKu&tHhsUX@20XUNCTo8h^Q4QFk{DHittcwpdy-MlHp~3~{+!lA;l7c$GuHU9 zF>+=)!|y~wPhD-+VMooaJEOY#_LpUQvLvQmY+;`#8*w4Bi6ST?X^w;S`QhLPJai3* z0a#1S{R|GK=#E>2#_?v|FxPvh1v{8z<_}I}VH9?q=<|3*9Bd+Jyxwk=#c;Tx2q-hU zx6m;k^haEd(_)PDru;^Xi#_xq#uL5F^dqEUSmw;uPvUiR93b=5JCxfhNul5zRFQJJ z*3+^22AeU%wc32~et3xZh$!iF*6KKNPv0`699f&8`*4VlZe_$x_wU~Ur z){rABy0VR5GbZDNWeaL{3+a_k)Mrc&!)`*taI;Nlg5z4TLrwV4JJ1S zx1&0Mq$xM3spyh6Le=>hE^p-_duUI)@hF_Lq-r8-*`Tr;t@mj0dHxVd;Epf9V%fUh zn|)+r#3~k!A#rI?7)PHzmHDjrI6sdyQ!Z7U=57ej{&0xz4wNZs z(F~KMU-=0uBo624TgQwh^{{G&i>%73p$abRebOqn-`C&lbt!0R2*N-rMY@9}KY_{8 zXo1UdO>vLVe*8<=8;u8{mC*Pfuh(R>1odQzlgZkGX0TT_=r}gAJBYJM1v%Q|pkvJ4 zzcxE$dY^bj$$z$23Y7eB34}v`affH+G**rVw72WMxvLRBzztv^0I}M#Vsb${jW4YO zip;vG#64vN8*r!bU)4o40z*MKUU+A^bCvVlegd>X9d?Dm3pf>Y_u3aic4ivS%)OT%E>(=U~_@)%EXu0GQTOFXxr z~pt_AsSP0=yROEn~@-mq4Q{;QjoIo#zFT97)k*#uZxYt zg1HaLR=1mrXDUAC@Jzift3(c3mUBmf71a~KxI765H5(kAJbAO3+C zq3nW5Zr#dI9~=yD`PtAKU06TO7hTU!s!}OSyu+FF$Kd^}CSc3yQl~HLwavLDenKlFE>~| zq(TuGXSI2z^_AA?(M#x#8>kb9?n|QLl6ZLQtnzAsL`tqq2;JxPZ)W=H!t00#SpiNV zj%pKOtj7Kn)x>9YTUBFf!Y$ZhFxj))Wmj`g&*0u^ljR8R=oXs=U)*&` zuFO~HmLNplWjr(T6{E$?No1dfg{$u*4()o8PA^2T>v>Js@$^MGTI~L3$nV(d4cNUT zYeJxSEQ~l^1EP65ajn5lH*`6((U5!&!tK(6u%X3E9CmA|uvlc{cL`2XK6aAsmndi~ znyX{29P0%dWKJG8j4uXCA3Q6`3XyuZty}X|y5WN46r~6lNx}~;d*(ZyTbA&y5${YJ z#c)V@aug7I?6yqnRHQv2-Ob}Z<0pEh3;C$%BV7qVSG;)Y2MJ|nFU0-_e14^{ya!2C zwJW1kMI)&v5;V@cs$9$2 zN>k(4lLmF0#^-TZB@DtB2@)puJr30oFtO*kvIfW$5KGkkP*ijps*KQ^iQ3!_>`=A; z)HzTS&u0wW(SA;uWimoc>@auQ;Mo?!6yX9EUU9 zb}Am}UvP}0F-k{&L%6$Y+mixTA>Y~UzEj%MAi_hGr;dF}x!(b0a|ZLSpE0Plt!tNJpieUH<=Cj54CUzz4Qal{mfug zUtHQbOj~I=PKe@DrfldHO?XCQ8%r2bD1x1ZXQS3XNbaQ0su^@1{UU#lr6K-;dAnO* z8l-N}TAp8T`eDR_j*+DjSmq8U8~cU+DB-f-yE z{L^KIo(1qCfIyP6nQ>^B`7-!t(sw68NR)Ndx*!p7&)_{UQbAg~rQz(+S9vqe_w_xN z+(QLi=|61h_P!Fa(tjC2e&}R)yE}`urQtVfseKp*2#@tn$a^`u z){^jG*4B5E4&trY5B!-^5LxVc^5`|{EEdLV!~=&0XhDobjKr*1HW%oBvv9&#gWWBb zS~b*?34^=-mrfMaOAj?K5@3#zJ_)?I*@BmEj4p*#X=I56S_xZr8ID;EQg656pq>N3 zUdCzO`cFPwlJcB9K}?(e1KqY%@T0!3dd++P$nQ**i9iV%O-`vE&?X{xfVfLe&b&ME z$5T?6%vY@BT|MG_Nuxl&*aWTujxRf~d&cE>($U;A)OEYW%1EOKAc9*>1~%IY#wL_T z+BsouMgqV%d^53yd2?v_MFz28SMcHOr~?bXXgY)))e;yAjVkZL0z=|5M7IQyQz!1U zfN`JSUU6C3q=P)LCw3TIbtJTB=r3P+lZ%J+S}RooO^bEbX;Oo;e2{9(L5RBcf*R$4 zSfv__W9<+1%7+NQ%=TVl_<9?1`HVtOx9&^b`aB5sxLYlMr703`kW(EEB6ej0HUP~&ia^o-g7=Q=!wX*^KjT+lAmLfhP~Dm6gKl^ud6 z3GBFV*xejTgd<0*2k$(t9eUY(9AcSpuVlbkPi)xLI$jY+Pp$}L`^1>1cLc|n&@+El z(y`TfHpA15uDoSIwx@M>kvS8hcA-s46xYCz*=ytOpDyzm4pAXsC`=L7^4;uWtE0~; zWWW75n%P~p2pdNDc)r2098l2xfBl8ZyU8sHYg!krIc{(%Au*~ufpeW>ya8TT1(B1w za*Em|6IemB3U9O=e*X{9Yz33(i*KEI;n+%~G~miWN33w5l046yTxHFz@m;AH#rsgI zFId>w2^=QY`6#bF~JfU)~EGG;aHXPCE7b7)ssIYycg)8i@ z)N!|W@_QP*`c|=waTT> zfH>grp`GmtQa*+B;AQ<|!{IBoXNUvArsbyI2qP;-Y%Ybf>I@ZfyO4#N`0)Il0i%)c=qF4RE^nxr{ik-nMF05NB^PejEoZ} zxT@C$>{+8xB&igtS{-*A!D1_74jna**fM5FyeJ78d0$=_7_QW+;9CLQQ|YHGpQV)- zKwusqdd*)UkZ^jCKuVdBVzU-fluLDeu+fvwDeOt#Wyx8e^<2_sC{!m_@iie6iT)gH~8#5$aE0~+AJKp z=6v=mu^iouk)WcD&n%60hbA$%yz&Qa1MYmdsW&_XSr9`%t0#i5B0v2J)enhkfZD^v zHgl;``4147FT8YqN2tDX2wy!ZjVXl%rDYpK{9>a^JGdqp<7;zHgm?UVDrO4^RKuO_ zY?;ZDll3kL(MHvH(hjqc{19b&JVd(J`{AbPb&FcniPesRBz-{sl(b=GO=sjm@jop; zpoF~cqe=Q$lh9eV;=pvX<^_kP(MBjpf71@0lZ7-obLIkG^6O zb!YD=PS`n{D_z`>YaTs(DF40nv94ph_1J`3DsFRQMJ$x%CR5 z<%Eiz8SQ^B&qSy=tn9yvwV+3aZowOnovnhRK_oxu-0#+i|r zUXvJh(~}4SD3>>BQjsX;BiQV7PIa`CUB|~f`POT{gIhB(9}M=vZUH~AZaT6nmKR1q z_4KZS5YdZro*-=paCjt2=62#c0tq;ua5F-A!WU%3YBmWe@B96{ua-#0>+U z5!$=h(%*wo?EvAH4epcbpA3=Ad}*{^LZZElEN&HWW(*ecbo_$XjHkvw8N1X*qKp+(n7S#PNlKeQEk*M5+f6ntXRt z+mrlHq)|F6y&o|d7IYmQpj4{czu!V-zpz z%>S!P$pySs(JAi-^9G$n3H1zxeqdF{!D3+b$+&ENBp{RKRWx(*2wDDxnWz}!`6}iY zf_=g(6!#j=8$3h3J`?Q3x(?hemu73-{nL@#i;%4$>4F8lQtfQU5y_ma$}37DdS>4_ z!8Ws(PfdG_?kzFUwfdK7Qx148>15eD2U4NMSY9Rl75df%*`OoI$@r8&2dC<^kQ;SF*ONy1=;ab4*6hW- zDoeV|8uSgjtyJK8RjGUcFnQYe!DrV8=E=Co;6nrD`0)_jTmQ0F`3Lz~R^fGIO_fzp z10|H{SU5 zHC7nUSSsc+gtuycWY%$IP^N%3XWPzsN;xjY}Wucx%1t!6N&qe=;3++nh;|O;T&> zoa54O`^}{MC-Hy0&lR+(bt=e6@=wl-*1cX0Eo6A)kQ%WSI_b0lLs_hvzId&Djq-(5 zc*K5dA8cpq+y{V*-E7wZX#;CmbVh5*Hk0nIH6cPON$^7M0k&@+O+rVCC0^c(?OD61 zpb8L%u`a%+f|z4~MU~i5Yt;6L6Q3Ui9yvk8N!05W=&oBBL$yI?mEenIzEYv=w40E$q3gz~Vu8Aw?E_6{D7}`}yV|h5 zIEyUPH3ldAwjEHOoxkbvu832qyWXmVtyJz^G}R1qNl79tEF=kPy*L{@SSD;eTNrCD z%Wm+lQC>3%L+yxJVzdL)dC0u%wvbJjSM7922LL_RX+bMmcwR?_XZI8a)Lz4nj1*ON zic8H>NRfbK>BNQw)yfRgcixXw7RslgqYHpG$oEEYfxW%O4*gNj zR=a!}_4>uanj$$6`S`PWrb|836tbH;KkG*qv;`#tte5KUedQpNxHYgY+y2#uSFJrc zah_N29bg948+G%|^sz1BH?l7PS?y@=%8N;le*}*S% z4`aC`6{JYzru;_foq(bR5zW@yy3J+W*fd{Xs(maVOIa)fGZ}Q{2IT)tGob;7TXVN5 zu#@iQuEbRQUn*RSKi+n?im!st1zEV*x z<4oL3Fe|r{bFJ;Y3J7V+($62_e1!(!k3is)%s5kH4v zJxr!m?Lrk}eJv07b5fLJ3NBAWWj#1s+1S^Yaj>BUSgNrCm=uX`Pt;%@@3rLicqw0! zO3M3t^vV!Qg(gi&J6%P^grqZS`55xgg36u?R|7&PIcQ4s6*FIO8cL&x6<5nf)AK7biFS{opmC;M}<8o#@0SRwGi`7Wp8OcIPD% zxd3q-42?MnMu1o>fl2>?C%_pTPy4ivCp(vo)l|I^g&lU7pR%-PSZ{RZK!7Xb=$-~e zrO4SolI)3+b4Zzk129mM^6j|{N#O>^rRrNqc#j!^dq!=Chl)5AZc~F=pSH+>Xxs4c z!jHAlqWFW1TB(1e-vVA2sbnZ&^({b`a7Td1g+24L^AGW(8;}{2oIa5>%=J5y6COXj zowgi|w|~oL7c`qX$OsQcC{QeB8Fue_Ku8NAXGRPi{f1_E;-^b*jt`E?1O!HIV&`}f zNdAY;)Tmr=a10R1QcK@4g#S**6n~wgQ>H7rA zJ1!B=4Q^pl}Y2GA?S8`|O(;}4trW7fN zjhK1b3?Ss1L$Lv*1m6q20XHCZ@8Q?At}lOiC}L`>&uD^0ajq0*9WNd4v(w z(OG%r+LRx%QF5*tA39p*R~q2QabH!4l<-Pe`u$d`60tTM<8Ku>r7~dc#yr6Mqmj3_t8In0*ggZjn;P?Z+hR zoYxK{hzW-hU;hPi;U2=*hHKoS+Q$Q4`L4LcTrV0x6i)oCTt{FZR68izU`msVMjx-p zjmaBGwwv#@`UXlU&|U8lx;Y@fTN9G_p#dPpASn3kkOm*;Ng%%1aGb5o9VF1FJ#Q}Y zBjba1Ok$YGPQ;^|&?p z*z1#PtO$_3S2CNOz5b1X@o?zeR4?nE1Ef?!UxU@SQb_lt61W`} zT+|Xp-hRl9RkZSrgBBjZ> z5NnyF2C)Ls1*G=Yu;+6WbqCB=YtnylxRUTr1+4I@4{I^bk*2>3VRxa9YWnvp@yM#j z?5iZ0EU-$x(9xe%XUBU^6c6j`9V(&>W-=a`uD_&&X`7cIB$;$)L|~_a(AePxZBI0S z^gx!l?8y3FyX!Z@sD^VtWEHW+L}Hk>y#<(qUsn znbp%jNx3B{C=i8aPcMF75@+x}25eL~y{dbW+94v+(Y58tarRNG4i6u}$y_OJQJ=qk zv#@yo4Skvm$Z8;|4!pZEh2`CLrKSS83(RdnkM&>ntP`4T6(GfgP?C({g4K_)oq(M) zQ{kHkKJnRCM3#Z5paQCXo#8OV-Po85hYl(uA-Hc>J_D$7@?fors2CtN)2Y9VsxntL zuZiD%h21I<&NNX3560<9*I6 z0lR)HCXNNy-uo_?ANwZG#C_tYJ`S4<)tEZMX!Z2-Uc80aTsPrLg0AK&&9ysaCP)qR)tpAcD>Mdxz$MzJd(1CmTymf@kr|A@*A} zRBFfi$mpT@&P*PdRruFNF5zq#)CXEAwBu5UKM5jux}W>JgGrc*VUCqZEU#lvi>f@V zUh5Ombj)+Dx_2H1jnzX5(_F+j;){S8Px@*~aYIt348D^1(&PYXgO`Fhz9fd$|Ko^}DeUZrL!W3ajS zE`jZSHmA8n0lXfH&2eEy0(tEH$5d#21~8`A3O3pz{-*$HWN>EtS1eTYekeFzf{@D2 z$gi9-whAhMQY>?Xcl+5C4hI)&umTR(&lw9)DjvY5TZF@+_fSgK5md=sCri0AWDkMD7A2xFmZ5_Viks)!1tObAP^ z{7+%TI%c4y!DZ$x-Ez3&o_dO>_hD-1rPEBr#!NZ*v?3UB7ieEvsSl)Fr&TO1q^Fl2 zDqGgG7VfvlaK~UBWw5q3kbQswDhP&)(ze5e7&POeUkk^`JfTO|@a=Vw+w%*v;-wB) zZQSD14a987d4G*x#vm4dOh?ZYK89gAL(*49ClIi z(fMJ+Ul`!ZLN>ie-Y#P$BMr~Bqc=Ic`-V~#4^E3SeRCL?VynWo7?@=PdQL2|dHzgS z@*~$uMV1w%f@`(F@rVl)vo+JJ_#qCsSuG6~Hu02=v^B@7oK2{eZFV8PKFO@%?>{Qz zm5Jti)$*<#*+D4GuqQe`;;uydbi3Bs+?s^f}16?-#}*8z93L>g468hHi8o?jRCP&C8X!mrOo zxmU|NvIm{N)zYhPJ?79Eg4AH~lbCAf!i#%ip2)gw8)Bt-hd!Zn&S@AqyIG0;eOp)L z@Svt5pK5t%`n_}PbSq2P+OVCup3>d$=?#v;l(PLspbWs8)CAZnYwCK=tkesVs8JFG z{g6(eRyGwbtAVd&cSE8EiO(x1Irn}X)lP-ocRvvFA#}0dt%odm6zj}D@O#5ETQ*KA z3*Oq7QrHx{EL0F_r7qY38Kv>=TApCy4(o!M;nLo190LGT$;8BdXU~*HWoX0^{?*H( zl7w*}%*=aY9g2a@c1ch17o7Fq{7)Tjt zX%I~kf$=zxdKEPPs<6_x#$7z(^hoLuzq?nwWDmw@53NGWo##S(??t!e3DqwE(*Jn|(|sHC765-AoSP!`7_uAuSr%YG2U~%}=B5lU!x` zoLw9>ke&COw$n$WgFP+r|et}W82;5qfPf>oR^<46;sF_~dML7g3 zH>G-)q&KQZvd;bn&(&{`us)Inb~J6^(BUIy6e-f}-TsR`q(d@Vmkpzt1;LNuxLQ3k zAlKq8jX5qXj8agoi{$Q0C-94XT_*M~U-CM6@@owgVU>2e*E5E*_bCLl$sI=1lyi2? z&=8MCRPS$VhZ#)cxCb@ZkX`%cdQg|{ksnU}huY0fIAw&*#n)eHcs~Vn{g`r-4#3bq zi>fz2!aINm?4ZEZH6c``jkQV!8T^xs!S+~b8Lc7yuNwB){?pN=od5w7RG$IgkaCl| zF7v5x5yjrbeN`pf%utW8dHAfVz%{-ZmDBI0rQ?!RpOO;$qWb++OvSU{OTwo5Jb?n) zl}ad3rjiYcb*kn9>*vBo=V*$SnNo^gniouaK}*FmGvpIq7I3=nw}v9*4$1y)NuqYc znO(PrDXl`%;=Gu*c$E=EW}QNsAvq)_X>2Dlyv zo(W=-Fq4P_A|4bBa-diMM|!d`u)ft(Km+a=4<=^8?KV1h@C$&E{k?YsXdBm5lSs(i z$G85qgw{skOJQQT=+js|OKw11I^wjcH!Cz7m9@YFc1h>6@apYD+uSg{ONU}Bwr*srl28W@rfTNOS6vP6+grc za?O_U_bZF{@}%#yB6@d!xu^{Li^n}4dpNd}eo>;D3)EzM5aKtW%MEa~;`~-k^opic z8gXt)#f85ffanV)hwGG{{g4i$GT=TeN_~NaR_);#D>bxEoSs zu6<$qPin*dT%ki*&dpl#a69M3wtd4`XNU@&Huwyna>(P<5S@M@i$i0_7Z{6ZTMTn_ z*@KAM_Hx@Vc${Iz08D|)!Ue=n5SV=u^76xkbV)6c!*&DQX&0RL`f()*I^0lUN(P5; zhNRZTWz|!S3jMXhOfRMSP|Kn*?`@E2WX7h349!fPrbf0CeBcygf&G64*5R`rJqFu{Yz!_T8o+i zg>wc+03hF7xx+i>7=VTC_(c!^Wb@$I8HdW-!a|t-C<%?BWz8zRcVB&{R)>mBISu)? zsi3^(AE-8{oL8x&1~XoHNlG|y@U2B{$fP^x5dapl;6b)Rb2e=&2v-E8_xU-l!2NDY zSEPQia7OA0#8f{{Y+9B%$O=LU3^y7jW-=!h0A;hzcK?&OpNH{4o5zb&hQx$08aQ#iW==5*q-4v?wpFMYQpe<+;Qf8 zbE@itH*p8%sH!pVMDI+4#pOmMKOKjgeD+dZCS3Otkl6LGGhy&iSbD%i<4@lp#3R;-THq=`abb#=BUH5T4OW%qjwe5(NM50s>({RQ(T)bY| zw>RtRNeAX4q$qEmeQ?0kbf9J&b8T=a^JTADg-$lSfnZ)WPBvQzCEyzQ76` zr-#I*B6Z>EBe?cG6W=L2B3^>3?kL9=1qpr|ffjlsEX4~!`e%H>mJ|GZd_IuO003s2 zvF7V^B8{+ps)bRQ9A`y@s?%IE9^P8*EcMjxczPxn+L#C}N4o))QCAO}RLf23-E3=U zU1D>bK6+?wt~F0X6cg_SCHpqDqbdaT6A(1h#9&_{%!=>-BChon#k_iWEj3?@7*RN|wx zqS@UoR>zsp0q?9;85TOZ%xLr>FXtHqGBBIoPZ4ha6itVV)GD%JAT=v#& zad!?G-DId=#Taq+aAuop7jk$w*92FNVx%UN5gTu?#ag4-ZMAZ$0{U}v0xz0rc+i}W z@@BG=?jZ1Dy`sE zN}PfJH(b@wVC=0|2A$2l3K-saSq+S_%G5o}iWW;#7PaH|7L8mnzA3v!4!{9>HRX(o zLu9Qo>&TkeMo21}G#cr}EU4-y)kCnh&=Z)rh377e-k7wAEmNC`r&$&;8KFZ$K$ITf zj8N9}D(L-K7aiAi=z%HS;wF}cx>B=+CFCw6sWe2A6$bKI2GEF_PuX%qrXbA`UV#|= z^lAXhujeA7fm7iQtKKApIGdeF5)Q62vJIDa+#=tA0Vn3qVIENS3}VB$j-rVlrt<;# zzfu44?&o`#S@CDG8qm9Zqw|44L22kFF5KR@uR#5nTCPK!x2K4~ZMwxjtkZdcI=v78Chh?+8Z55ay%_W2^%A~@ zRyDlE7KReV!ua|SlM$>gsftH4xW#iuj^KweG5KB(SAdt0U7JFj827(XYeb8*?x6tZ zB1Rz;&%Q*UwV|yJ83YHO5in}TrXv>fyBtUR+G8E_mUW-VM%pmm5@G_p2wqZe(GD-LpPo<>1{NRy zG5fDEbilb}JANdhCa==YuAE|hj?L_ax8kG+t?C@ev+C0r+yRRo<7)0uR;KhjM3r&a zf;-a?TQ|IvP}Q0uYv95j!(ccjhXROzB{#x6-4B4ZKLI!aU#aA88BL$mM;(j;4$y>M zD1=V$7lC*d`YQ3xohyV7uO%Si@?aAH364xI{~N0UMUD1u-YIb3y107ld)T4=XT3P# zIy5+!jjd^rDa6W_;3av!QTR=>m1W-p*GqOic`=9{$(qG*?2=d(?JsN^)>WM)>bMNS zt6*}i9dVO&1U4a0HytzFe;b_QD^XeiBMMZAQZz8S?i)OdT?Rah{Js0qM zp94B?y#czRgG3d&Q75iMgxO$nT=Y`xHoq;YlnFVq(dx>)=zZqd?f%O%a5;D2%vRrR z9a0+;nRc1bTTQ++eBiSkr*QlNMe_eMn1CF2LGS?ySLs_!hIyVTPi|*J?o*c}z9t|f z`XPV~Y3%G*2B__cawN1^a*f%2!Yj>r0UlrF57sygHos*X7BpJefVdpotUK%v7CFql zr@1}i-d1nhr(A?VY+J2azbLqR4Kr2tI}8z_{f^z^bxNE0%G%_h2}xy{ob`q!*a65e zqD_dME$L8fmOdORDwdI;kl)a>swEtrnc!G+B}hA)wx0M#BUx`mSB^Am2NVw7apkqLoi%XBPk+0(Tv0RL0thRWY~E6aaJy z+;yc>8*eLGT)Omdqcx<-1K@p3M|s<(2sh5HB7h`#ED1aCr4zugLamd!5WGTPRgf*W z`x_r)d{enm0!0;<`Ug&$a!w&qHIzAhdgfxH)u3@NPbP(3apERC#@+xr1p)~T!50m2 za$H-nin2!YWB4dAcM~}So=zv*#CyHRWyW!lLi&Y=iK6w}(y5KhGOnn`XC>J(H`e&)sr!@%;{TTQ&Hz8_PitpX8-B^jZ2jKKZquqE2)MIizp1X(Pb;3eso`-1s! z5|9%_{PnrkNG}A|pche8_ZR|btt^S>2B9k;PpAFvgzQ-GmjD<<62Jf7ysC6kHQdddoU4Fu;>;iCA%$F zR&QdTa})(>8?vxZmWq0Hvh6cf%9uZ(V-M6!ptte}D=-saXjj)Rt~dnLAsXAnlT65L z^cbO$AGGd5MoRW}P*JJ(Ve8sjtqYvTU{O|9U=;z88S+mO@7bD;8(-qITq_)YIjnKM zEAUKQ2M&Z^Qj;}1>dK(IZk-x7XaI@ax!^v0njIL*8hu3N5Q2t$5T(wykgGt>?oL9 z8}f7QE`xBzU0+ugg#`LSTBw3f@#Rk`xCxSZRcr$+8$Xovn`(r!0!DJ>XnQbo)XAjg zmk$HzN+<7o$Sa){dCQtjG^SMM<79zy&^Z_HG+`Y+yP=)Q{`YdLzLt1pX6qE|YAI4} z?G8BIsdWrek1Mhl7R`cf^oaNEO65N$7{(kkVA4apbaD@JRUw0#eFX;ABw>Bt}#&qIL1^}m;?tE^`3S)SE9k^50F(cb#> zJ}(WqmzQiD$v$0DmN1M!TUv!}xoGg&jF2%z_SCJ=AyxyDr1_>IFSPiN{Tn!V-aoUh z6_2O9L_mmY6YxO$sTz+lbdTeey}BTYqD$8%c3P%8`!K%VpwTzzU?yT%bd4Bbjg5M#KgqK9o7_epAPj? zB72LES6$=0QzfneA2EmU`vCRYTGho06yNPiFrqWZo-Hg3W`$QeL}c6CvZ()JGmMDb z0Iv!$Y~WsK&51U?5B>+y%m;0yBs+W>X|1hf#=x;XaQD7$^;g$m4#}NJ^G+-24-|O< z^Fl#t9ou=7{t3C5#)3>c^`!1XbTP_>&XQtpkM5&WQm5$I_y5pU;RT!j{!(EE2|-d zX&!f`->~3tx!-lk2E^5K)z7;qs}#Z%D4Q{xF5lH9MtYlK%Y81GBadcxL_k1xhSfbc z_37Q#kotw)+qW##Cj=#m(jr+}F>VVIFHEc|)dnaNV9}-87SIpB7v5G;I4z$A4niNJ z@#bTFe1TN{LUvi-8F;?EvwsD>q{p$+@KGAB&)F8D2Jj>b-4BF=E6UGp8gB&OW-WWy z7NZoiR{@neTmWYAsB>2aaZ z;ZsKEVLqSPJe(YAq(4`j4AEt*WV%~hlgHjLGeuJyY3r){-#63@ehj>39$*uUyeH>o zHnwi;uZOSM2DlDypDagki&k9+By~!@#Y|Qm#poo49LSjNVCgI}68RgJKa0(P@l{pE zk1vt50K$2c`F0pMS4C`ECAO7#P12^*WPXK__$5Vz`AR6#SfLC9i7qsh%k8LQ9Z=@T z@Lz(;e|DIVaz5Pdm=UQRrwZg@)<`~b)Bf#9KRF!>P3wPOD@JOC$MclqTtb&m7h(*F zZtc%GW>&|Yl#R#}WK6(Hn)jMhOqM$JVCb9st*E6m>5c^hQBD(U*km`6=9Xuoc3+!? zkj0kSbj&eL%i`ImCmOdtafS2W9+WJyy4jm-90Hzgy56bPn(fsq0&Vy&QpTu{2%Q*3 z$oDA~{RNXSf+O3`nJG|%U5kJ4#$+a*YsHk0))}Dq_3-*vc`ww5D6OuxA#K>pK zj7aV^!%}DkX8-Df)GgB%Uj@<3Z$8KHQ6X_kQVwIyR3!62udH2#Hr64EZgS3hDEhry zMHgjSmoW2e`xhbTF9cyk<LeJItb?|F-fAGTUSjp~tlFt54&bQ}i<1dCD~(M=^*r8)Hq11R&W7?()t(N{QUk#|0&DW_S>hLYp+F|8;(v2}f9+8sdGnnfci#4$Ru%6$sntV~A@Kvfaq5l@(^_fyAg5`>% zxpB`gt>jWoo2s-0iM`55cUr@V$ZEpw?vIF1lJLib89d=8o>yw;-ZM}hY1n~5G|5Hp z9pZ;`abjS3N7&^F<@IA!2zV+bcyT^0-SB&UgL_3S@j^MseN#KBN=5IAsKjiN zKm^P$8{~BRcSgf4-~GP< zMxp@=4F2uc!*_|>eUeI{fMw;Vt%VxyId!8~IepX`$Tq{D=eROUrtg@@53fVvrwyTi zK*d0_Lg)j1iB|f&)X<*aBj{w!(hMC5kxTFZnOH_4BT`ehqHLWdaXKA;4Y4eho4Z1= z5FbKajkRZ4&8?XgGeH{bKTq%T!LhY3W%}=M7^cm%nf$P&b~o-Pu`LpDz9pZyOMZhb zyMI6f4C}79c*qqD>N=Xz>O&pSJ8eKFb?~no$|}g|yF(`Z|ZTY+H)#5tWWy89q`+^J}?9V=z6(gF{lxncjb+ z9_0z5jo9r}WHDeUA{EF&WQG<@Bu}V9@KDFPo~iGyp|3z=*yB*L30r%g*9@69joFcX z;N^S%p2@j$>5Dbj55ixWWpL!)sM=ti7ch^}U1RY%gw@f18Zk_2+L0edFP}+2=t3{K z8oI?CQs$v#I&MXd{NBcbJwVjq731nB(Nhf{8xV-Hu=qzS&r*tspcX(er^`pe zP3Q1h0_3LA(MS%xQc4&Oz^D zMRy!YHa>yH8LV6Y^+;?{s7bg|Iy)R&j>1GGj-sWj%V;?`kF zU;b*bS`d#|H?Jia`jV)23V0iPyfWyS3rWx1#L5bm}87>@gQEHR? zj{Y29jh8w^ayXk;+5CzdB7BQX(u|GgSqvzHCj>A`(lCte05@&9F2q8u+M60gR*a;g zq}gMPq&e?GJv0l`;5MPoGP6L_)qd9f3*|D-SCRFnG1Vvgwv;^PSHf`mdb$n7r?5o0 zP-WRL4X)AfK&HEY{Hhey{Hg6qZxS`*k6vak6m2F{^)s6nWpI)+Ip6G;W6g%1$aNE~ zd6n^BDyRlS6TFeBk4J$> z!_^npjb)dd&^Hy!;SM8JM-&etdy;Zz-OeUix_DP;cMuR1?8%)x<I&fB9+BpbVz8+8oPgK<(4VKPiOf?p&T;U5JQUs=i5Kh z9WzHCAkeNgF#w$QOHib4)APhmv-0&AT+J(`qI~vZXuqG>?T;jvDnn(*4hR7%nHcA< zEgo#x+ynYWs6L+urNQ{@rCs=`15#j4l@m7H4HCAUq!zU-E&2!)3Xipdh?a?(-Mi92cbPq;398!Y^Y6pQr_2^um@*Xk;uAcH~bf`9^nX z@XG07)zXQ5J*z2+d~Cm!m~pI{@mLPu8UYRcHn#njm!AabNRVTvtUf*`%RNZKE#4Pj z9E_OSPBgLuqxRf6=Py=sfixq9T8V2fS=>F<-rqENJ}0A&-eZu$Sxqoe$)=EJ=6_aQf(cF4o)qOEV9 z==?#avjDf)m!r=65}|RBQfC|+N4YP9VdWLDOThgITcgr8%m<0t>g4Xt zI0zo55~qN%8c`u9%dQacsd!QLVh6r~)`B(TnnG1&SvV6!*hJwoIMZ=(1tWi(lW|uM z-=y_E&I$;U@K>Y0e$ayw49-{dS+koOB@yB%?R`=*1ut72mr>L`Z!lPqrN;nPG7}Z; zpnZ=Q7v{s$tN+^M&NWK)>}duz(npSE@?9n5ximqjqGZ#tleFcQ!jfE za}0?+e{3-;Kl$?IBfbiB(GRqI_!b!Ic#k=?Wa4R_C9oBTJU7Qf2NODgTvF&|`~Vwu z0m>K@D>jKvjGZf;AP0njyb5WWg@Qhc;AU=L3h3zo^$*H z+*;G%6HB^|mXuIOd??l6(}W2ycMui@|I6^*nOZ zau?HFp5Aey_=1DL(Fn6ctoqS&sGjfXF7mXobm5${4sx(Q{+Il$d!+hxUzs1#aK8!V(Pa=*b2{Vf~gFr`%C}_xIoT=CqpiA-k z$~A3}(yF?nXIshJUuiqGg4RoiSah$Iu>^iEzXS7t%C*EdHGZO3ShVQ>?7<#d+xxPi zVoH?V9II{NQFDpz&LA$#753_2$%dFtUEepwQ0V=y#S{pL2gRm;?c#wWq}P<=)KRMu z^5tNKKgbqvFx`~M@0Un{9j+DgQ0lm_!HIzbS2Xp7wXiGKVG>)FPZSA%+1m464RWlq z+SgE;ZiJrgc&*M4o4ij6136YU8QyJ%j$7R+0%X$nZLo7pM@DREkJ3WEV$@L;vxW*Z zE+B#!Xi6OAT>T@9dAa6{&WD>$z!ORO@*Dv5ax-ZhcYyCHn#7MHt}tn9(zfja zL)Jlz0BE`tya>sJXG8DU&}bIlvTu@ zuiQ!#5MZA{@GuAG!S2juXwdwMU?fo}n?Rz=qI+A_ktLRCVYKr*1N~e@HgzG97(QUamq#-suRu~6{QnBV zN>>d(yM$`DTfU8TADP(T0`<6zIbmZob}z!XLWJ>(=|)M-D@@dfk(;Y>d!T39Z4^h( zk&uZ<#9-WpjYOwq?P*P=u|IX`Fn52!ivQ9SwR9s!1L0oM9aOy++bv0+jsMKJ`K>u4 z2beQV>{j*;@}|5|sMk=*cxz^j&^cnbBr#T#PZQo%(ThA1=Of|`wE(eR@u5%#bzX!sFk0w@^u4`tU7gCE&Xra^dm12dNc>vKc@SpBD?Kcw%)~Id$s=9 z2huE0dzUn>u{`URV)X1$<^c0v0CuuoCu~ed6>X|2uGhn1qRocb-c}azPbFh;Xf6Z=IKM1hv`h%T)uLj< z>>Ah8BcoN9@=vK@=MSLVuzL~0!x0b_z#lR2=bQC4jLjvTDnO~-lQi1~QL(B!^uHk(|y$@vCSo%S8OHv1SSO)QOIe2{sHbCW&5}*i2EG0W ztW?g{)bF}35#COYnG4cz-kV7|ST&O>;E=FMDD2^I3j~v$T(G3K(uU!df!myrBY=t~ zMNe^%Ydw+i)U-&#YG*o<6e6leF>Z9IbR1eJvW{bs1Ui&TZNPJ_Tg5kdEcI*v-&LB$ zY`fAlUq}a4{>O1(RS*YxkJQ77kr8Q;ll{p;RSx0@Be=~db@gV3a`*y$B@=;_RfuZV zpc!+0)~~8PQ$M9(a#_d>`k2GPpm%o(@4Gf%gKZc+Q|Wdzt{pngn3&nP4nNvzva=HIgmL8%10vu01pD?CKyblR> z8QCRldWQ06D1IMqCA>6ywHP~kNpD5#%Dpx zcPX;&p76#}@X`ZB!N|7Tbbd&@(v1LmYDg>Bv~h#0s^K)rNwX}G!e_IC(Bx|LLnz4o z$^b~*)O<1J?cjaoG*F0JF-vZXXt! zub<%w>9_-p~xC3Ug5mKcC)3yf5m94 z1qjIz+PhB}i0G)oESpUq&jdVuSBJ*IEErl&vGg1;DjioY;B7*e!s&U)8*{Uje2OBQqI<5#UVkcJY-r3A zQcOm53lLQ0ah{nZI^oHH;_25`fDp4Kag3q48eH@T z))hJF#uzCtLQk99cEc0cR3SWII+*p&c0gMf6x+HYfV4Dm!e21}s)g|SfQbXrungDW z8Qo?*H?oLMhWfLn-}~1!)W|kmhCT*@chdej{+5u1C&3V3m+6cbq;N#U)J@2Yt2op> z@Zj!}TM(XjU|)yRZS-%qM{XwH0p>x1w@q$(qpQ;Tx?5kPRD-M;o}OA)sddhpIzc#t zFW!Q>_L=KK3=(81LNDQ(xIo;B#C$7vru@PnWu4&@49CbynYNu=cS zAgG`cHHOz=kdS`-?OhVi&)LQn1`6oyVs3$he?e_9^07kkayZb5E5T(#tC<$$Y&`N5XaR=&xxSojDFS@GUJ z6;$^kYC18_u%L6W{&(6Wo66c`o-5x0U1|CAA|ikw zUsQ*cXN>7O4X$F$0h& z*el%B>sE%CEU=_kO=@=W8PYK zeN2RiNb3pt?IrISTOR=|8EH5cD7?_0GMDpV0I`-zN|wrD$0NpINM{>ya9D*`O!dKl zVyvBY3JQx>Xuzr*7=UPENu;R0vV5OKPd#r_QQ^3k_ET)vJff^F(S5;PeGg|}ZdPeR z?CT$*Iaaa0ot@#3Kht6zUTy71RpBw`MVjkT;&jSUf4s0s%s{03^yB=;Aa8)mcY9Kv zqbLy6t%J$plb6fGP(V5YI6;A(g#t8f_@) z%Sa2uRBvlrueiIRjs+A@VV?%?R@xo?+`MPf+0*0Rl`ym@0#DxOMx-b24Z5I7zu9JI zI1C%zb-ZtLBYwow5_eONBpPd(lF9}mx!W*>e#SF8+#^ImobRR{`QVe*h>O)~TbY@} zcLbQw6;XZ}EZ;syt}9kj!mVOT%@g4A-ZMM`Szp*;Ofy)11;PxE!{eqs)B#blH!m#p za&jcfIxySN6CwVFX?IU$y2=5Bp3}Q3ifxE3JVKgHSPs(dELKm77XUE_Pm<_F6`3Eo zQW5J0*)Nplg4|i&S)k$6yQ`T~9w-7FccR*KorvU zHu<25s+3TOkd%O#3i-5v>apS*_1QBL(caTD-uRc?(z@$npejiUFHvqo8#Qx|8--*Tu}zb5Zg#|bjw-EExElk|~$=n}VM@u?XQ=CvxZP(`D(+T`=gfD%C? zZiup#`|XwDXGow4RJhRD=aYC{Uzz!@bjh1_@ayY#29t}E_!F(I+fpvlt9-W{mT}SXkNiI`p*jARQ0~4qzmbtLURn`wIpq-z2n4Ut4`- z-{%CgzJ+*Lr7BL5 z^4cHNTiIQeiRZ!Z)%mNrfY;q$GfA#9=`SL3a{@zQnF0Y!)ua_o{#9g(4u)u6TF@#y zCS|i}5P*2VKdsMDXd8|cw7i9AmQ02&k{?JUXkf&Tf~ln1<@IP0tqld~Q_vT8 zR(@2nZ-juI$6Z2dKum>gX3@;xYBT(3A#O zbGaKg^90b6pgF_<-jgAT2XL=Ds?vQ*G~`r^D`9S==C4k9i;$g-f30F(9t38$KsV;O zBgYLBj#?R7AEZcOsqBO(*k2K5;*gOvZd9ZR;&_wZ&xwp;?K@$Gqy7PBLYZv#e}=+# z(HH|&0C5KsA1-=w@+wGvmkMP)Y88M^a$}YPyDt-5|izp*MzcCT)#F|4d zS1ZW>@vuT^q$}~Vj^G?7203xi1lE*vf^a_;q}T%9HrY>4a_?aZHUtcAh>?z}=buno;OB88NJWF!W68UlkLz_15hSzw1H5AZO95N+;;I02fHhK zVA8hE0<|TU<_+|8J@e$PB_+1eC2h+wD%jghQeg-s05u%kvC+8|jIFs_g^O69a;<$2<3&aoHAE^rXE&#Yz zAU_b_7GVa4JmGcz@$c-5zlqCrXHOxM!7Ijzo}+Nq@dJMCT0s~>x4HmH1eBtT0b}Ho zNZNNI+JU+Ug2r2E=B#%#kzjoOy7LQI%jNnZC&HBC4z*cUU=dw*-9hi>h;qKEYXN|B zmb+M)fJSAD3Qb3TT~Vf`|4a3Pv`trzsn)^hnTku$NH0xJr3q<{QGb#B-7uyap@WkJ zk#W?J4T5B}Um_Sd_P1KhvXZ(n05ZOy0RHJif2`Nk)QnU6`0>eBL=zk(wkwgo^m9!z zY%E0JmgR;&b-FS8?oWh{D2Yg04jOn^qc?v3HVZF zY&Znoe-8jWBFkPV59zbxofd6*rNV9Zht$^)0hhamGi8m?NX`q^fPx^~f@0|(SmjY| z?}FD@($N4@u&Ivl;92-7z^oqo2rMZlbL5V)E>QY?cg3ZMa980rb1NYL0I|TE5306R zvb6vMFRnyh#Fm#5MS$Iee!YIpS%4Xhu@vg)Uyiu6GL+Zo;ph0-3B@uHdrFnYVCsdd zHO3k$ORoWj;z6p>p664((x+a*BfuK?@*L zkm%j5f!Wg$;kGklF!Icw*EW#-=Py(#q|6N;N9&0k*6nKby#b&4Yx|6)lE9(^PXe$> ze!Yq>t+STzMF?NWGOA%UxSs6NbqzK!xeo-j$Yd7-0B5iOTMu*ev;%?w5kQ5J4`F$C zRGc%V3-=EF<3!M#cA_?2X7v$YNk3=#o!0afgmfnIa~0xt(} z2KPkBpi_)A9F&l6fc#93<%`Cx(|*NAbIHlf=d3V(k8GXg)L=yvL0GrnaCT30Z;xYt z`SIRd`>*d|M_toevoagyWcn^gWaJPDR0yzkH1P>ie(SI%NM=M5n2Gw9#!Qq{2ELDm z1RW2;L6uXN5R#-lh{W_`zewVLtPd~Yfx#}I4r*#g2SMH&+tu{k6TrLBPbdE~unW(b z&jHS(e+B?CV-D+{DW1yLa1Z}uL{#FUJYf2*3iP(WH9|gu#Xz}thW4Wqo(%Q?LADr< zTf$2Q8Z6lLXX{NRa?wubQyp2rz-x$gK4@aI$051|B6uB@j8kn`*TtX^s4gv>&!5G= z(y{H19ONe!Hhr|y!+kKhpjx*f=+p)L$f|i~KK;ENLm3lJr`v)D3A!!{K@(2;YPxI;~P?9&a)B*{hU9Vs|+Y@{AdV)6%7I`BvA<|G< zJhB;GP3{tH1(T4s-eI|VZ;N9zS=$5Tp_0e69*w#@YJUnunZI zf^%w);bP(Ot@C}>;73lpg$&|8#Peu>JFJKsKhwa@l-NVDxIU8@YxCkC`Ej6&F{*|H z+Qxd&ZeIVgm16EutWdc$__xzE)^gR|-<|(}5MT>&*P>q=1 z9W>2Wr`(B0g)om<0B&*DTrxGns(5V0Mi6O*=5+aYh0h44DU){xFZEs?QSwEI7)ggfJcJ;97I zivjA3L{9MxKSH70P&fW)@QY&DPx^pbj*FlI8UQ#|btEJ=jv)~gFd~TZmsx2%xPZs9 zD9V$`xsk(&94f%93V-;VN#0cG~!5s;d3@eB~Y-AYD z#A6QdckzZwkaIgdKTr3iGwX5J-)%(H;gp-PJW|0qCmP9i)?u`qoi%_#Knf0rQ?H4} zf>6`fUFl$|0X!>NQ3-6qukoR)Q4C|$G0t1C(D4Kh-Sj|2(*4GrAkJmLSM06>T%so3j-uUacG+6|w`rrW4 zNOzC}Koo&nK~psWU^i}D@n8r17l6uq0+gCIEjyrW_;nR{+J1mI5G%xkcQqEM=y%w& zcfPNnN~pib9rvPPGMevV9#23>@Btanj@Lg9r!%ag7`>7o`UU9)`2%t)VG7yBJqu-=$R zT@K9lDjGFRpuim8>)$+ z$9b*7GbbcyEfyAXx~DwKNRO2x2=#G+p<+RO)5j&SI=S0orci0{&Nw)kxJA}_AFU# z4B?sl>Ue#4(OzoMx{xLD>ie&jj7+VxKnQ!H*g1HpX);Q@F|~Uy8H;K)BdRrOla=q)oY_wTkFo;L(;@>qth$F(cI8%r6v~}=lw%K0=PjfT8 zp(^j?3d;`oAkYMO55)rg3PA88PptC^BsKm+u?A;tM7-p(#H0idB(nptQhf;#eZnYX zz)zxlkY+sis{4JB6mNT*g--iOy;gLruXA}$3@5#9_m>Kh)7q%0Q4x-jgv?utute!;3fcW*q(2GH!L=F_P}D`>UZ==IM)OEd!OyTY8nsL+2mjo^~CNF*X^itVp! zv1|6Ke0i_*B0mK)Ylp!0?f}<3-@ug8MHH-)n#oH= z$5?QMA`jx7SGPqU?+_X=FtDx93(#6-0TZN5+&}Bqu_K zg<5IX$g|Qzb{2e?*aptyyVP%>!-5$YY{88ecn)dK=~adc@Mzus5+>iSIh8%rhEvoa z#V^lG;Z>vkk$x&tafuGCX#RT{)vreVdG>0k+b1uLA?_Z0lXwCz2R(G!N6q#T!6P`G z8GdI2e5+c%ZUy^HXj&*uU!2qq(ID|`65I`Y*7UVAsnE^dY@JyeGs`WoT-I65ZS3@_ zPE9wpfN}!UiJykt0GXTTEmBM0_l*)!0PigKT!u8p@G_gQt2+S;wJmmVjIlbg{N-vp zyAPo-@;dYgen@NMo*cNvK6>2OmNs0%_s?~zy+Wa|Vfz-N4La7wxwm*oWP7(yUBX&L z&YYYqL)bJ3uUX}Tx-tRXeOMN61C36ze3#}-2$zqzF*yo#z4YQtb3BztFPu7BtDr-4 z@+PQ4HL~-ntW#sTgWufThu^*`0-_23l{pP>o~N|N2`8)R}YU( z(Ru#}n{ASUhSBxXe&;+uHA8~}3fCxKuYe}sGpJgfqLM_Y!1c>GR1B@SjzqzIFAss2 z3>Z=m%?|X^S=ciX#d2mf71BQxBcQGRlyIC;EO$~Bjj@;|J;C(et+GA~jzNBU2n>s; zQ%+7m5LiVJ0pVCA&ueWf5V`9y_;AULQp>%@k5H=hsnAMCtAS2{2Gs4XN09~SMP7Ae zkVzBK+514G6_1=4KLo7XvLz^DtMB8gb9i?%<7@QW_MBR0MUT?N&%t9c*66gmY(#X1h?qp9n>z+`+v0$Xh~XYfCYx1A zjA%8@M+qq8&lE4bh5+mbJr}MDBH8NhH&zO%(US`r{Y!3`M6GlPSVF|3k0dy)!%MGm zld1fCjNj`Lz*}Pn@Y}qeEL%VtBlZV#pv;on6zPE6%IiU(ksxTgoyr1);pg`6vQ_tj zT^T%xH_ApqB$r|=1(&W!L-GG{CiCq&r`1zr5-a3~=`-nzE|skdH7kZ1O~f%ag!{w6 zSPB)W&0c0BsFlueM^Ne{kamkHwP5?pu*iL{So1*Lj*BjPx?kIh#fB2PIf%m4gFV^6 zFj%tYXUFn!sj(rJ<+K22av~xl{1cNY>y!-VCPs^bQv|$MA$jW-dpvuhB%X54DZi-d8u35%3_TX@j~j0oG^{}3V9lajr3f8VmalHX;AS)d0ZBoHevNi>i#K%x9o<~G*6N+aJbyjwYp zLC@y|EXBCYd9SSdM!T62UA)QAwPXx7Xus4=+6Td2yoo7synrhN8}XZ=kJ%H2K_Po% zoai24W9?2EREW0O(K8NqYj3JP&-WMLWf)2XSC7Z!BAcEX>8}K=lRqD-bL{H zXGb@ab?@PqwqAM`+a@bKeI)!G=eFS&J8IUK{6j#CKEFs&BmgvNwg0C8OGEZx>s@Bo zwN>C4(wVaI()sJ&(U?@Jm!vN%1!5xPoO_8Bm<}lSng;?Fk>H}n;9}rjW4PykZw&0m zFN^LOsp;>Qc~!d6Idjk?xCCIdk-*@S0?nOY_e>VA#Iy&q^$GM#e=e zR*r63Gslpvgqe$ByFWRecRVp(d0@LEajxsCU)#n(MLRZ!8zRqRvI#tAelB8iYJ+iW z)8rgvQ+8ntQYpmf#*b@Z&|d9qC;oqi&8T8Q8goCyW*A)}zp-7Mcmiji8fSk%TJ0h5k7wvWrn2HNXAyTBQ1gYb=L zVvn4Qsu2Adm+oN)<=g|JZtwUZh~A4+{i*xq@#A(lY_?^(R}1-8!RNC4U6_;d$w>>0 zbLjXJnPLT0nA?GKZHERai>5=z+@iduU6al4{|giKOiVN7X0x#sus`CEX(rY@6Mjri z-Cf!xs-)n$uxPAoz#E6|vAw880NOq64QEcjIwpE~q)f0n)pl;=Xk3KGOj||%5-+Cj zFRziZGS+Hd7y-Mkl}U{H)e=B=9rPGTF2m6@7jbL$hy;z zJP3oNYzPy-e;>AfX}9O#So?&XH5TeIvNUfnEJ(0S>A_J-kU?@E!Xr3oA(6+$jHEM2 zbL>W3zF$0jA65l~DxW8Qim$B0s_F;3a6{L;ho_0B%N$r7miRUW;USpUkdc!gJbNfH z69K)(DdX7TiXn7<_4ZFdA$>O5A*7yHThS+LV|Vkr;vY~hOkq3%a=zxkcEBG+> zxDxI2kcr8!3hfbN-qbFnLCltCL(wObx+=*C`RPkTzPb4){_R9|UV;ZW_Km-$DxBL`sDh!;@n++BfAZnG5>H%G=VrX1z&?DZNIGZIkR7HE-WRgu;33$@>IxhW^$ zjU;FYIgJ$_?gYmgzf9B#d`71iz|{m4OJ8FrMeM-5Y8n_J6n*kBq%ASwFYENayR;7jPS zD$@p}RXFe}p4-YJ{;5m2Do-C3yHBxM_LdggzIDt^%Cvo)eMO@_nu#^ZbNvYdbD*^0 zMABP6`~wyM_lY#==T~j5ipqpwwr?1r#ZST&F$|W8Guy_A9umZ4cMb5^wJhF1uC!wA zjf9?F@Yt(z-c6?X#~jf#QTHeaZB{B+0(G3>{hJ(#Wq7&5p^Lm)@Fly5?Qm(5y5B4) zf9l3&mWUj@Yy%*)nk-WRTtZE7X9pI8vnc4iUZ^z87WKyFB_K!B*+cuj8p(`y^H#41q0Z|`yEUP5=kwwsSws-ID`Mh`-YT7-s4HR;{rf8m#h zP@xUf%$MXM^Oz1K8C_MAb~F3K^Q)In?_s@@CX-`63`2AKX~kYi+zcPys)cx+mA@+W z@{bUXFcCz$L}KF5*y2LQF(-q2d;j-yv+!GV&a19jezTnA2Y*m z=2JrvmhJ%if|2i!&$V7ys0~Cq%oq!GQ_pI;jvLlfbryVD|33EFowf?RG^oh*rGZFh z2-H>xjz*kk5HX94MP|g+dxJbv0vCcg%2|Zya*D27I}?T=weM{)(K0Fdk(zTm^&r(0 zBAMzfaC`*_znUixjBvRMpHad1f18ZNlUEY^e7S~7#}Wsm_4!7`gw%~6pG;~qg5>vb z(^fx#L$2GrO77J~$M_ZH`I)P9tN=f`@gdqy55Fe)TfZZbGNLjAuB2uSISuJN%ZvR6 zLA1k)K6lP$!R9q8@qz;~3MO?R4ROnNp&!or$Xu7PyS{R9zx$2kKJyH3ofgI9F1#RL zZ#1nSD5#ue`L%!oN+UjZ}rqM9qnH35CH(pOo@xtH@s zp%zqxDm>S&PDKfFi>BocYPJS*fgoVe(ovrosVkuXf6j;P1+7Ewvl`?BKna+o`8UZO z%pZp{6}qkS(zCY;SoB9FYdH!$&t&? zP!v-{q;)2#mWRl=bk>{57&6*L4-->M4D~~gaub`Ja;vtmqsw4!LS;_wbv^>iS|8X4 zV+~J{wT^tL=gqmU97V`f904;8hE2Y$p7|d5`i~20l%YvyiK-oS0fPV{1mG zX4wL(K7=WTbvv4xyq*8amjQGuTb`s{{?YW_GXj)H4kTQ|t*iJ2$?SNM5H1#0F7%q| z-tI14s|QixkBUj$omnCo1%{Zjj`*MYc{DdJl@9%Xgt_al;1U5ozflZ`_ic0c@FP3~ zj87t66`3-UVN?G8FdN%9NT$=-G#o6fx|q>QU`zbD*`0h_lPg$TJJMPk?*v>Nr{oI{ zREOELYp0S(M?NhGGMf^n4A4v*mq4rH)w#8xeSoidJN2O^#9~Pq5T_=;2&9$d>UhU$ zXKXEXsXjg9k;Y`nWCy0>1>tU9h37^1@#pQZmzMDTgT6T;?r{qoug!^70l_vu>pxMO zIMX@4o>`{X1fVul>|*J~F#P{U9Bu5&M~~M|7rW{eu=`wl(`^vA+NgJm80DIG3w^3> zg1m8VMF?74p06Jp(;%3EF0I0%T??_u=BtmJyYtFtKp_~a0OMhE+>u0ew%K88sd?|H zsbd?ncCf4F&_*fAH*X`n5Xv1HP|Xc#yw5aBa?AMdzjJDqmFTZHr2?+_B(S@Ha|$O1$Wh37$_^);tdQR>kD8Gv4?&4k zAQ#FtwNisO4*RoD6V^NYMPmbJo`NgNMPD*e$-cTo?}S=KWXUL8^_`exq0gWopB#rX zo9l{T#J&-(8Cmpbh?%_jOXSDw%q2k8dlM^{apem!6gYpy30H%_5 z{RW6Pr%TC#bsoFNF^_ZAtN=gPGQ-IH+s-*R)C$K8s=CR2BB|ghf|E=4fEbS0-&XDz zh+m?4W}@HOd8WbPYkX-WtIbh%Fa=E^@+QsmEi}0k0Ww^{HD10oKWk1)OWh10ooehhVuF((eR)S)sVgt#nw886E+y(OOP;Bo-1DX zwlZ0J*43%vZC_OcHvsjMDcDFE!D>AX6vLkIJ9yWiEFA(RKypaNh@c3Om)tXN?9oy; z|9+hBxsOj2KD5wZvo4pHg{q}}=C*-F>5BxHGE*KpYz!Oryc}lS(Kif%By#z2fC!~U zvN|TzQ=-1^eyh-^?Sx)dvodi)*VsyN8w|$hid#Y=x;z1S6)*l!2rP8l8!)->YNUF5 zrgH5=HKd3I=(}|sMqqx0;En9M?J;j5VxT6xq&>yfafUro3D;%La-&{Cxv86Zku(<43|!OTGahY=vw&od-{?lj2ze6^D2H=jknf!8$0=ll+Na zea#ydAQNC)DG0~`R$cYn@`u;yQ~KI#9(AKx{!Rt1KA}GXU;&Rm-ra+K4_uh;J;(vc zEh;^A%Mq{qK#)jd)^P0}9Mc3OYzAzGQGvOvN-0iJfWrEEU>BQo#px;y9Jg1ME2r_% z#<>e72w{G*fXbMt(mfyRZVB)0nhE6GeeKL=E<1HrHR3F%k5*%e4Uv7h$@+?Uy|b~g z&~}H(>J;iBUdI<|86fX^Xwi&k59m(F$!LzRH676*c))ltWV&oOy-G)?JeHR<@Su)CUbl~r{tmf zX7W?xCqoWZAP~hT`w+;AeuL@11JRaauTKi;sO?LRpw`hhXmr7I+qB)vf0;h%V5yHk z^7y&?h`KvQyaH->1Lo_4x}Cxww2cNK^84JO@Dzg?FB zsMl#VW=qL;%9c>yQ7IU%FoD}}ebnxH4O;lrA3nZdC5KY1l)C|;<7j+?@3 z7-c(lUsi)|ehGXyY^Qx7FJm<*NsLs?!{>}8W8e&wuKZI^bEJRU4+NDK6bF3S9~wTz zRg{=*)*CDyGKf@Qzv(RyEL(}jq-maV{V%2(++|u1khMXH(#<0ndYUF5gP&=q+Dl#Y*{y(tgVCYc(rjID{Nm@wV-8occ(dN?+lZ}k^|Y4=XhCt{KKFv1 z7*f-o&&bT%gK{h^hbv9k*rCq|-ujetmk4AVt2r_JNyU%^Z=->NGuPVPV9Dr>gyX8_3HO*PGZ6W-D1t zW?eW?EhUr6mw-=G!;RWYlH}BWWZIQy=!0J(0+8DN&gb>LOYB<~fTK~)ffkJIZ5h$RE(yT)hm-`g5s8Wf zp9K~h`=2)aVTBXKjkTJZ_(XFTAbE8ud95Ta+5-1JD9HvfHs??Y{E6vNq^pJ{*)418 zeRY~R5HWs=j&^c&y^G&?S1_*aC_e;u8UHQGPY(S!;P_jA#<^=qN2@t12#VYzB5(`c z5VkA%WhG>q#RDx@u-TJ9AUplx#J$Ho6hZ`F>%lP3 z8ZZ@X`^WYxaQCj?pnYxVpKQe%(Xk(5rb@Pn$_RKFivExE#rR?N2qPO zN!n%hC~g90c6t4#pG;SNV39k5xGj9{`dmF#083>3PO%G*=TRix)71mX>SVxsw3ZMN;c}hE- zEA$s=tLa!y0XG>=G204v>=v@uD)iA}(#Eu%hyofvX3^ZlFo&V%=q}vu=+g>_zNR9l z#$+O=+`toZ0l5%dU=2r29yF)j`d}VBkcw=+i-En~P*}|#weRhEo}~%UA|f1J zn_Ylx+3^hKv@GBp!>d>Ae`Al0cFEcR?p?zMC3x!WOJP%OH8KMd@l-~8y+ zR8o8B)%4R^4QJoM>5Uo1U$QhJ)ZP41Ulqh(Jf4qp*?+cE%H*{#t+g(n*Y*2q4*A{& zs?MqF1e_PSi+J=z@7P5qY9tMu;7haT7hvu?NgZDnkAc6<_%Am4wKYDe7OfcDzVTJh zn#)=5Hfk}WaDPM>dB5-I4Ko<_3tH8+gAfa0#Px~dd}BPP35aBl?_Kgn)SEaUEWC-H z1SaL17|Zfr4Z*_9)E#lULYFQp_Myk-?DTJJQ7bI;8TVy)fnikPf9e6~7zs&jDWvNB zntrRm#$ZjQ&Uz{IO6)yL(d_#lm4rqyxN7|iIcyE{5~1GhW~wZWZ1B@3{eAtIkv&r# zr>8orG2%t5SharjU4nL+KiK^t?GDWmVP7kGt85NJ4|xQe7Q@ZnVmBys_XmDa6| z8JF4+*m_yBfROSlLE0;hXBX1k3#y|Z^kFcO^4WnZ>Se>w2WBChYa0Qgv_J8zAb3U*xq> zPb0#B@d*=JrKZHkY{P5aVn=PTgn>|Lht~M^lTLYBG-4}w)K!)EJ~@$kFL>>oF{)@)}?Z{{`94Tb7HqMOmxd08$H0)P<& zb~O(h6$)sjlA(0JzHDuKC?r}i*>kx2@w?YHVTOJ|h$Mp9Lo&P5<-GqTgL^{)q=*n8~j?Fu(Ce7t$CT;upw3Kc*G;Ny4ypH`2gUPzQGoov6 z`ZNW8f{`sL(y9)othWe>+`Fm=}AdmFqMv2V&;u&n|33F-*8SIEc+*7!oMrAQLZEJLtr9i^{dv3 za;Rc_ycPLpthwzonT06%Ik;~y!)yNh%I_&a_;6SL6ly1OW#TLLJm5S8m1ol`AgX0H zWmT3CheO`AXB4h1P}WUqlj=a>6K5*oa#_T-S%Q+y*h&@}pCSMLx=vp8^C9`Pa@Wrl zlxy}1-A{urNFAlh9hXB(YNB!(Yg;vwJ(O=Af>R8(vY#j=k%;0Y#1x~(ZAe-*1GCP4 z8_7R=M9DTU*X#L0-px#;(jSmfuFx;zd?5>pQ`O$Rs0hTG`0hdt)l?{T!TqshyAg9D zfgkl_Ct0K<8OV)zn*Ju%gt2ezSy0>^nbP76HKx2}Xar^T+Y#tKC6PWjkzkcKGUS+V+S_l9!x13K<&LOh_J zHparYG{_8t0UM@;K}&cTUs2_zmKu-b2cZVtNp25Q!3T}JwE45hF>cRDLr%$FQVM@U zy!A2hS9CKq;^&ic{L#jj8@M`Jr6c+E_>_36vMr5%(p1p%brjfAB`1j@U8|FRjI3!F zw(OJyEN@%@>5mwp4@>ky6@>=!QWkLuJ3P@Be6Stw0U=o z6(?GAc?v(wA}MDid`@xIddO?-Q`#_i1fs&cI=#2riK@fcjRj>1$?95J_xBZDNsB8Z z;m}R(l7nAtk;1#@?L>}gQM;@ZAieBMG5VJx{&=FQhe$rz$Xvl;3Fu}=v)mo%mvg49 z2jWW~3n!a^=N$Lmt%T6<&k|DO5{Y6Vxr>y|geC!&biYFv{%eZ%u34yJ=+B!6`IH}e zp}NSL9cFWOqbO1XBx_^RivhfJ84Gp9UxNh$>C|qTF#9(u9+J_a9{u)zd|?v-sig$r z&MDlqSMB;BJ1FecKwa^I3Lo(l8Ts!3$hq(B0Zc^%oYgCQ*)pvRK0>}WI4Ux`pF=2_ z=JTlvl$*^XNR`bCktz0|77Kw+4OdFG9(Nmg8-z56T-~se1R4qKFkQ{B+&Ignrr9fp zQe6LLfg_$vVW`*2j*iQWC{>v` z@xF4#Tr{c@T+{qI(lPOQ$^9VFnvnO4Ey@1St4_VL{jwlnKPi1KR%k!T=Pl`qW6F}j zv~sEASXFW6-}>=;`WU_}hKBy%vfP4HhK)lZaK$tM7FwJJgH`hh8<(ChbEgasrY~Oa z)3R_MC6El(b&AyYS?BxPbXYvhT8VS5LrCsZ#}G;J(8tdKL*RM!?J2pPgjkM&>&~@g z66o8$KWnE92xqE&dFUQV|G*Wc3pF2=T+sH;^W<{Wtm%FgdNGBw0GK#*UGrlC!3tG` zjH~7GKJ=7XvHtxNN5$BW!<#07mDbpm06kWhwiUA0 z+50Cg{Hh+X%m=vMa=2Yn<*|2nq|{2gIU21(+Up*D*5F62`zZiFa}bGk{swhjV}a#i0(ZJl0D4XgglE+Q?NejUsS7(I`fS+=A#Zf z`-fIiEc9tQQ1uXV(2KK!QOF)feMDmtE*av|!X&RmWm^+}ne>B<615)K4m*42lm=!< zDxl0)nRC`GAQ+3QE5am%{G`GIES6o&_a@JGTKQzpxm}^_bTa$GV)RR}Q!|*|&7kD= zrEkcL@-E}lIl$CSIk)xDMaC56hiicY_#{14EDn=sN?#TH8DAys?w zKi92>U}3in2(K*{?XFFU5jcFA0N|9L+tZ0x8%54pT;_+KC z6b--~@3tuo5wdEv@Z(3Ot#M+jwY0L;L!=lB%vzE!=U>xY!RnEp_OBp#s@FcS?a+uU z1K@cswPwLW`%j~usg<~6oRSh1hf>u})pvLoQOr#>_Qg$doAu%gX6&!|B8PAYfPa#= zgg$vUbErK>LN_y`V1oBt4SG>q9{8=XTyMp6#e=sV@m8C?-wBz}m|_N--{|wCgK#$T z3w6@r+TKYV1n4u%tBY-XBmaP+F9Ox;6bun0s^}@UZWDF>GjLt(lrh*L1Py?QdV9?byk5nEXo#ID}n&3&q`~XCP?IqJr zK;c5Hw$jC-nu+OZU-W(yvY2Z;TF3Gt_q`}j2`JByUy2Z(1R(@zO7#C&ZG3l)b1BW~q!A?|Z0tT*PlJRi`S&W39fJqDfJ}8vGDoe@UNM z(uK%Wj9zRZciTng>5grNQ2rHn3{1A((XKsZ6r!t!_;AWWZ$J}&OiR`!9ku%wofeIt z#$R0M9PlXx+TBBVLE_NH={W(h#)}Vgfq28=mgENzk^h-N@BMqnIA27D4`s&#(bezQ z=w`G`?VLfOcPB>Os!l=U`Erp{iP*BPX+NwC$?^`YQWEJi#=AcjKwS=4QQS>!^IMOn zW|s37?i%xtrU%Xb<(e^ zfH*vSX<%4kTF)9u3?D?>!zxFQ) zuq1Uii7-!hlz@&%+%BW^R5gyX!U1#^5|qGZ7JDR}^pTHt^3CDURl0FcoTZpO0Q=dW+nH`ggHYI(iP zxv)V4%Lueiw=HEe@B_YBQPK1laBI-BIiP3+-24H}w>ZOU96R93UOR~ItLi9jWYr}v zl96l*5GR8Q2nx>76OReJ-7EoO?b7PJ&RL{yAt8f`OLul>E%zPyZLrOc0td7mHRBbO zEc&wlI3iUzX$Yu|$DeUhdgmr_L^c_c#H!*^9K}UNp0|Y#PNqI~ie{rY7z%5uOqz3j zt&4X0^gP`0PYw05oW}h1f5dKM~-8iBhfnWJ2qVbdl z{g4940kteacYBTSG}tSsts#vxWjS6L5ElV=epyZGgaNGvXyfH|_*~f&i0wn&VL>2-h|)<$W^~mwEf?h8+QGBic?WxfW+ueTlT5K!lx!O%!!4c8J-o zX3K1Ym26!#ihfHs6u|MLmM3AhbHj`Hj$AtmM^!4`@yW77&S)2AE z+_Rdbeyixoo}r^Tk|7d=vcqmLdO=E%xeY4)?>>rqCm_BCX9s&b)!f`!_VkljSZGM_ zc+I{nEA5%SdX+FrQ3!A1_5Or>`0e77IW+YDvXs+`P{Os*F0kkn3QOBH4_P3f69Enl zzDHi)9AeT~k(&jvShb@%3cy0QdZSeYCa?2>%!)Uw{sD0OHRVNLL*{o5%)x=tIVIZn z82&K5eGb|2G($^kg&Q-t3~kMpnyae^Gtb5(P#kTTVT`&z@kXLji+!rW?TdsMPLt!Q zH`fhYm!=6Apf=pKbrt8;pFDRJ2H6>b`lqYi=-$_FCkA9C%Z)9VYOXS9D#o=VSQMtO zGu#*TTQ74ra@t{v-mKzRw#3kQ!v|*m5C>sBQY8~Vh#vG`+!ov4Qo(nmV5(S1V= zb__Ydu@iGbCA4<_fk;-lZ^lJwy)FO$<2d`Iev}K!D`v4dP)5}HE24jdEs;6rP7Zdd zDK%HuIP-VVu%|??<8t=6pxI8)8y5 zwI+wfFoTA@I@!w&b0{N+BcqDpZPQDb@>e_Sz;6wBa!No$Oc} zXjM0gs)=bnUj3-iwzh2r5)5fwU4gJM;>TU)%JD02;WhLDL#TYJUp_C1%$J z9DJYO>(42vP`e#ZpM*GZ6CnVc45`*|mE8=eogYl=^Vp}0)h=Ii0NgFaV+6A_a}X>!5w%|4JHEz#n?Eh)%j5skY#Z- z%i0g7VEIlZl%Ac`QKz=!XW^@5*m3u-2@+#G=mA9AA(KnJ^mwTr!dyC zJM-0+^p=e2R`eL}1xEZWpLbnd<|1Vj<%`;syn5XIRP6^~F!)4lA!TFE!_U6U;MCs2 z74L&amA~!Puk68ne$^43|IVE~t;%p<9i1`q6oz(HJ9C4`4MqM+ir*J{*Wj&sKgC(_ z(8Kav$thm%&aXJ?95s)}GTp_#;ugJP()-o5ENH3eNQ6_pHg*M?(nIu%(asZKP|iY( zn^lq|% z!En1G2S6h#tTsST-d>cQIrlVP9liI6!PEhf!9t@zS6_LA5Td53Ys#3qwvJJqo5ezbR#+tI3?bzL?|4rJ zZP&cnu*hM}inUQlaL?e*(!MY%2HJw%sR=eJxTRz>e=o#7#L*uTE zDy9}mHDZ7%Kn*?k2se5_VT}5qIFpp+ZWyfb(%3{noO%#{^fvg!)GeYd5WF&!qrQv&z+(8S&PZ~DIWHY<<*%L(qDCJ@x%FU8TQ!6_d>~;;L7r@(A{}K#Ck3 za^D{ViK#!jOllK<-rUx-i5k$B48b_9Y23Spx}iT@SToY1Kk>~w8y9Z`&^}2_(SEK? zvIXI6)g8KX7XQ;dcTINn&MiZ=Z%-@Hx<3L?V3n<|*Z^`a$b@kKtc@tSH|*tH@JMXu zWzUrL-c!~z7d!)^|I7t&F?jBHI{sgB(mL`pt$iiC8^$!}UYjDpn5E%m4;i5G?GJ%c zMqg^C-Xf>b(@H#Jz%bj)aTTQ&aAmri9ee$8Lw2*<8^P z6a1?c9bdqj)cB(jZ7*!tDoz5TPIxcW^#`3sIP?~jxrZa4If0jgv^NQS9j&vhb+Nf= zKJr!wz#tOTu=OIBK`dPGlyQ)C=$!T{Ch)+S95lzPyqnybNr==ru^o;Eb0H|5Y4#P( zneY2|?~%~pSKvZRStu*|>`FqGW^kRkBnLC8GX`$1d_HeBxU)|38I`h~#EF~|dmEnN z(7$s}=>6JeYBO+inPe@K9<4Px+!$XgEj=-IuawhCo;TnQd;w4e6K%aOWH5K0W+h zT0%E=sVOM@TI%7FLQ^is&Rz&~hw~zhs~w09UoxnJaJ+2F`3ig@)e`Er-PYJ$^Yill(qh>|^4Hj#9_etDjpQ$*&r6CJ(@!*DKh1{#6_OK#9GKYR`sQ|HrT&B! zt|=5*_d9WA!jZl|tgp2Xw`I;fR4x_Tcbuv~$XzVOTPo^DUX*(tv!>wla6?I%-G`uf z_6D1j4>ZocTF6+PPXrxC+kF#{QuqDM%+yJUt40u(s${HY6poWcCwhCC?|XTTy$kYB zS#BvRF@w!%7s5R9x5-*68W{^YBjL0W>_q>1xrjv~V>F?8)EBG4mq=nl^@w-(pgV%S zPUe`H#Or2B`qlTLj-dPv9eaD$h@r}SLrdU9INby$y~!k)L>AoWQx~rN&pqm_R1uOG z;KzTIHzV${M_|zLZHA<+N{06p+1b`E5cG(31g=Gu>=T5H!H81ZLq~i&9lj;$kGa%W zE>?uR%|(QaM;pyTnX$uovb{;ELJNd2_ABP8ZFqnav;lnL6lreJnEq+8&Ht$B~uS>xQm<=Y`EJqtjK2R?&J5Dy7!B_H7Rf7L#H@p^wh%diG|s6vyfTA}c*) z-lRZ*gOZnx@cZn$l(R{%vI^(>)WGg->@d~Fa8IZ@B?&_cEtA3`;Ct?eGup@a$0!R2 zDvy;x-(a!_1X=In2RyVS^c^=82wRGyGq-J_Fz?eMuO(y_5Ry@0Yz5aHKiSPr_V4HH zcn;c1AGK<`Xf4aRc?=YZE1t2acmxVuwe!Q#^-_f(a|aOKVoAEtSOI-3;zKE%5kr3S;n$H@ziv;| zlnha@;)^il`XWxC3A;m`zzYtn-V;1tb0%F5m4Ort%M*M+63~il3RW%jJNwLXRI#I@ zzR@{3LM>`Qn-$p8j>rkDhlAtvW6J{cJ?u;;51tKiBmYBx*m(HKuOr{%Q*x5dtnhc+ zycR5Mo-?`_#Ls}vC3I?Rck)0k1h3x4zXn^Z-n1WM^{5}vx);pokip5x+ zh$_*RzqsKch57F%2I2aOHJayu^l@*kUAp0y4-fqVZWo90&o&TuvO@f>EEs2s0)ycVv#1x!seX}QaJl#1PtKh z7kIh(J=Tq;!)lGv!!%M7Aq=B;K890~bmK+Pd~d3QdBUKYx>o5y$9q*qHsz_`Dy~<4 zjk1a*{XHoxP!dfvhbK|~*U}@Gkg7BwGu9hKGc$DWgTcj!>2N_rk#+xcf8Vgj+pB^2 zK%frrktMaEP~_dKdMx-&J^DiTn(|PsHQK~*ZW3*WIWymn4j(!FBt#Kve&KrQ@v7s@ zhe)%3ndVWD#fV++%EIX}#2Po0v_^Z(m=Ob8VnFEb z^tG-RVVO4&H$PS%qUcrE_fLoS?iz!?Wn~dQ+y(8rKcNfYI(*!w~PbZTi!));imVuO!&IBp_|mICB>EPO#soBH_#2q7KAt>D%@y4hoV$nLiNvp(C-x6;`UL?#maw&=SxWwze z0f6$3Xgomc;{-5>L*u)SFt_sx&z7az0E-0Ds$v)4!|KAl3Oy(3qNtwB$z5;&>ZD8~ zi?vpge%j@-{>_vA`@(dg%Ts^nKACz5n^x#Wx}#YUc=F){49v&)p$b_}4_iJhSIR)~ z|27Fe8+pw%K0#$thbPe~9`NSQ`dfKW z{y7$qpk2_zMyt@Y=*UI?WsE=nblUIp8#KJqG(4pY2Fk}|-Xvx@_FQHw&-BoO6<7Ig zQ_~1dOMv*c`%yC+JiqP7=a05j)Tp1~efTr6z%oo^efK}Rvcat5iLS=G0NhzcTaLuf zqKr+pI?Fk|-=LBOux}x~? z^{+a!F&NafC|0FRR{aQE@jE@zy6ela;EUSGe)5=YJo{i>622lrSSE9kARE1TXoCzluzX|M2=w}z9!`9pd$$?x$BM}1j!>cU zy||Qa(O?GW&{Az918O3>nHcqEh42z$-&Lyym^yPgFk6@SswQ%TA1iNc7OFm3>$NQd z+eyR;O$>m3mudKhc2>?BsYJ2#4jjy7k^O43qwHUxWEENN?j7x=lKa5p9CPPG`0ldJ za{VQ(<=QO#J+^0lKqvR(>U&a##Wq87Ke1 zCmVF$@JCva4ef)XEJX`h8tZ4pOs1`VusVae|G!5-1{E`7<-n*0xCz#6L`rGb_R(Be zuwP)5DAx@G^p3UC@<+qRf(~QyG7c6E&dcYuXYsVf{Sl-Z<)HxmtTcNB?j$&k3~_WJ ziXmW)nJ5{-dYBfTn%u_PlOfTJFIs2PW)jtQq|zQdju_dtpTA7uo!KJJ@tIRGKbWU< zxaG|PIuKFA4}*iNSIOL3oXW}1Wi)3F_T_{e?5na-D!!O=W}hlHroscO+B*VMfyFwW zpg&WJOb?uMD(}I+yAoI{nK>EU=p7BbwG$}{Kk4hxS>8!A{V=T2S%jNB(L=tR5>BBW>P@PvA*@Fu%*Kx|2E(i;mR(23naaAxLNg7qu<Doh$!yST-VGXj>oEhD10l9>b&#C zZN~Tn<4ionsbP{OEg^{$r zZbC)DV8d#_-{qt^+IJJysJ%Yg z!7VC%*M^$$CkD}B9tptu(cU`CbD~~Nu6+g{7Vz2~P!3qs9@zh`^(DCum(UnyGx||q zX;>C5$XzO4g}z@ifAP}KX^w-iZV(jJUEgI7DR?4hIZ-nrbb(8ybvAnBiB&5EfO29q zCPz=104*uf4utX_EEjA`Y0d@o;mQqg-7_yAI3!sw+LsZ%dXpmNONTrW;~t>2128fZ z!Y>R*XOuFe!VG$YtDqm64lT|6$ z6i>orRb@F_$b1d`UsN=_i~VglCJl1@-;+DJSo=B^yti4L$(gl=Jj;`ath+8Ni|npB zHe!&Iz}tz;j4t#Q-bk81RU15!f789zW^Axb_H*<1QRcZs%zBBsvpS}&EpS%VF@d2_ z0nfkvy(P+{h?KH^L5E;Qu{FRk>=nwA+A2l)yN-gk2MHl;0`g#()xpo&;5U7=1JO?U zQMqB|(jX6fRhR0eM)@X$VtmEH{+ut+odqEmMbW=4P=*hhdIC6#liD>TSbsa#4r1^lp;eA-sw|c=U!5xwB*LpLNUoHeF zZg4}Ik#2Nar1`}s7;yJhvZ-|6p)4Lnm}fA}jk4*Ahm`S0H!KXC*Y6BXDI4xHd1-^| z3ydNKUqNj&{<&W$8430Zdvwx1u9G2MC_O)|WQO~!l@hiQ&5E2c zPUThK*1el1587y2orbacMJr;8YYA@C*^{*X>txgiS7W_u8Vl|h7&eGEXm7{7i3zT$ zH2j^{%g)KOOwRSDcyQX&@M)J4n+^kO|Dna9H?>{rWLbi8t*7I`3Vk|kVfJZp1qsIU zEYQ;zCKmK!#Br15CrxyBOHhYp?;vGdavPRB<+)CiKeO z|BW8xygT{T3aqqOU5}6#!!dVSgKpN9tF&_lVqI|emjY672I1xD+Zk5HaR&{r4EKQ( zlQS{Qnf5Ee>x7 zrm~vr>&6~__bs`Z*tN;3XUVgdlpc(3PQx5Y(!iG@rq1{42t;ogqI+p#7{Yhny#Y~y zg4X)F;)bNgjUez0E!XnI9Mf-BZzAiy938M&DxU6S)Z%fW42WlT#z|R38Emo&+FN@8 ze_vjX`t((l_y^k_cM?V*IXIGd+loeIg~LKMru*Frh$Rg~jJXJ~Nl5%S{uL~mZ>0;! z?jXhyjb&!~!ElD$Zh!(C1(`8EVz)*nIuExz2x>p-IsfC6U;k{M@z=0e)7@`HO~524 z)rDMG+uoi9@WUVjhn7l~M~M9Rhm$F2=;pbi!behLjNFNo+;%-<<=mByi{YZ{2y1!} zS5^0uI#5)UCl&g~?bHB#XNWqoP!H$Nl=`T;at6CX?lIP?@a$Y%?1O{rX_!hr;!tmM zDMzW;H+8L2_Cq)@G*`*ej?HjmZtXUqv;x41J8!^I4;1b2IID9xH7YUBKb=m16xM1Z8!~g1ntz9@aE)hpfiEW2n!lA z-PS+LsJdI)aO4^uc~UMtf~Iq;Xv3hs0`sMDqG-D)OgNmkIYrU*-LB38w-$VbzZUa-yA zGjt?E(LP}CAvz--e2~jZOkj9X@Ffz|o{iAL@(N^%1|$H3eQirmwOx~GIA}|wSuJH# z4NRCs_msY43qv=I-&J=D(tRJz+~8$$>Wy8~beb)uMRC=-GjcUY(bci`)Sh1)Js1qH zXnt4xg7h>ja!UQ_+A34k%b9D#&&&mTlUQMp0&FS{8^YG$ zX2xjN;0&MNV#keR1XTz=WY1c{VF9TRSPA}Fi)`@m;`$E3UzBKCMSzEHMmDdd$T@Gq zJRxh-Ib8mQ)~D-~`+HZ=^IM)e$cHc6)w>ic)K=dD45aYl8f7=*1)P3z%@NQXn8J}edj#av#7$jEnj=_}USm|i0bprt*RWyz zxgnbsIcK8;ZR%1_)N?rweK&eOiOw_bo?*~q#Syai9j*#BQrt1v{>Irk&;-0A=W$7~ zH!iIfLi>%U?dwe1QD~CbG3{o8RSA$5>3NgrVTRR7p;RX4rB9svN`DtAq8>B$<@0MYl*_WU>_ znMOO@2FdEa!gwtFhI&&}_;{Z`o{N+O^ZDNF{2&of<8VPAD@4KD3wA!wq5SrxZAwK) zryD)BdkX<^F+%+a$lFm&%c`B6l@HX=^8oSK99irkbOh`{1=_-h{uq zRP+33ZLnz7Xz^C8Z@MYC(VZRIn(}TYI0Q-{5|bSljMyhhS9@Rbqz)BpaBKE>6Fa=LW(BGemi0S5eu_G8je$Mlgyg-IjbKwh;@KTlep^JAZEzoS|v~x&}@1;G)Su&O-9;wyFdx6IedaW+Y<9CW{0|6|;~}(m_f%NhAi< zjT!2+i57;pT!!mfK`~!#C|A3Lf+vV=1fwHtcFHjLz4x4=$kPyyqry$}x|zS>m5^68 zO?w6@o}h+;y07rx`uuJlddNSY&C2N9^s3W2SvfPlOSr!gMdKE3Cpiu0$@~;`5#o?N zqRw@zFZbKw&?bD`zQKA(jFzyc8)Kkj0G-`5<3yOr#<1MtO~3f(J);5EWqz6pC*|0AZCos&QDlmshPzB>QcVO9i{u zz9IqVLWo(_Lh6O0)5wlz1V*gaEq0HqPt>zG@GJ4ecB~H%16Mv+g^R>hkgx{^!?xTa zP_42C{2K~-?J6i>`ehKv`nSy3c&G~SV6hH4u4Cvqj#}wgBN%p6VVQ)f@|o0A9!_?# z$Pbu2oOArOPs8rCiY_JQs#K*1%3Bt_V_l>#Kx%_N2b5m@7GaMf#c(AyVKy0Q2P8H9 zG4=WaQ<8KG6PrUjBzZ?UR*~w+Z`c?Bz53s8CN@x!Il^2~E{Z`3K#6bb;K&36_A(Xf zhChg*dD3II1O2<84PlBW^PEC#TQd_P>g3{-x}1f7`I0Q4S9G`5E|H%8CCf^DbT+)f zwYW|bJE43FSsg-B=09Ch^O73P7SL_rb@j?X-;r2FPCf((HRHOez4~)Ni0Bd0ZdbFA zZtJEw@pf197iXFk#sO!G9Bn++9cOo;Fx`?-jL=?a`O zE*groF&dNj)4QLVANKzGs;rYD5!+i!H~JZ#q{>2H=|`ZvU1vI|u^R z)unHI=6t|8{n^pYubXWKC8qt2$)mw{4^Fu*UoRIU=?cZ0C3rKa2XUMThRs#j4K48n zHdP9INpv^tYHQ3P#h@y!JVzSWzcfK+T$>bt%9D!6y(881U^F|h6n_-J-Y+o0U;9yoCnR}-D812bY z1XPVS>z)e*NzEqwq=wAP=QlO$N}NK*lNjlXH_S9{I3h;?h~`K!zGCyHo_}E>y$~=u zY{YOT_e9{!xxw7B6t8bMN8O1Pq|V8`hGK*Xr9a*0Vj{N0YZ7}u`uHQ(FP|u${_Tpp zD^}LIlNd3MOo1`3h{@&(eS4*TgpXbE5CC|vC2s$By%IDLl9MHb1Yxm1W9O<3pIi==uN&N9_1%H(E1y_OE9>^ZviE8`*fh@ylCZY z(VmU5&-!B@47TXcliT)tzmYMkt+`swf08zB+{3Ni9v3O>@BxD-qPr84{yRlRJR~yN zPi@b|sLT_#?bkO9OqIV1MLyB@m^xJ&P5f`lZ>3QE6r0(UJ^78Rp+M%-%IzCMB49XY z6xyRd)0%rCcM6&%K7`L}DiE80jO7MLKfIwbQpgoZsE%?fXyl)WC0Wp>GAp495}{t9 zTL&M3(*W;!75%evL?t{&n_v@Mi}X2l4vq6+=e39src4#Zy{~(Cac@NogK<_OZ`n}^ zERISiBKl{PdreG959ukd3Hrw*o|8$|4^{M%C<}9ob)y?r%fE=)oBD=i+^9Qqb7h4& zm({OLOKf2N@Dn_`f;Is0>SVSh-P|T&`nz`CgJ@h1ux-1OR5h!{JrUMoF77IkBV%_b zMTGdU;{cK=QYQz%PbWi}Z({>(yGQY0N_`NL8TNz1jMsn-yrw2h)-(ElDjk!2Zp@9Z zBa*LOvlC_`WDQ2)+^dR77#mG>Pobh~y`VPT+kP>%O(E4cBBdzC1KIFkQ*yJ_DCh|N z*FGngC9Xlku~_lHRk8|U%6`$2#2fu#>bnX@?kZ`cOpMY>$c4(d3Esf$C=1ohCXu>7q*;yG#@TE9EckHydLp~1+xUF z+)h)@K9CgjmQN6Mi1cw{A>_}cbJ%;(L4#IQw%p(oJAfPNb@)K9e%|i`oa0hG{jsc< zu5s3!rBluN$qkq7n-u0hrX~PU#(8NV4j4GWDBDo~ENpg4%{xEqy6A*JTCQ^U3>8*- zbv~H#ix4j7Z*t3NTFihfYz&APp+s`bHUB3cKv3KQ877hE~q6IMw)!SkQ` zZU_l2#eT*|ncIPedd(pLc0G&=7RgKvZ+-YT(Ji7!GnR0Nm!9HWDfAgrFjMS^*3;eB zI@^lZh_#{Gz+MQTct%V&)c9v>f2!UHvjD=uMB+DPTLC-i< zwS1R=BtT$Q{*e51F@Fk@|La<6q(6ISM(%`A!!6w>p%_p<3sGV;1A-3N20UC9eeu#vN4Q$gcAcnj2ik zG>kCRr*EcxMWX0EY+?ieR*iAnV=vlMpzbHwVo{}r`$N*L>YaNsNYkN@JBo6#Jp{lm zp1p8Jd=5I8d$=5iZol6qwS3_M(C^ve#qOyn(H!atR72E|2x5DMW1dGe{dpoi&Nwv` z*uyqfMIi)5t|VYfDu<7zdis;@2ENkw(=?`7T8@d5Vx6D?=)9PNPwk(r2p2?C;ovQ? zdaRGhZHaczw6;XmGgu+-0>)gwAixb79*SmALd|@RLkqpA2hv>qlZF;)CkRSnJ>KSc ztGkmZ&VROW@fe?s4uLhDzmGkJTAUUh47vr80>$~4R!sjZeGGdc)P!NwsROZyoF3ek z7W?nepPS>Pcd&Sewn}(GtOnPR)SuYdXQ5D^vb=DJ;H-FFRLVNK^a8fQ#cp-rzk_SH z4R_$7{C{wSWLN7pVG|4}i7Ft1_bGzkDQf5jM1*E=tyit&0pS*rBDrvtUT0 zKjNg(V60X)7qk)b^y-C9jP}Izw43m_*zd6Y;iRpxBgA@vT+jH;G7H zdcUdUv|*$P+^RM7H~5|F{G!P`fbJvZqu)RTWRNS{Av!vMO*CjczV#I7WTwpz00_aH zTa+j>87dJTuRo`P_-vi*uici$_r9!OY_%)c?#QQN5c+2NU9lqJ58%U0>hc&%HK;Mt zs~p^`u={OVMxrjC+W;P+kRTjoOrdn=DRvGweDi1SMtX7TwWr$%SY>g@#W14Igf_n} zm#R>7x3C%w`il>tqAjbZyVPwWSGIOA&>LCFPU8ZlupV{!@)lNI{i9$r;WXD!s-3mp)zd%d)PJE zq~=kSO|;U|=^X!eDq=Xq9J_qY7SLiek)6f$r!F+7VP9EdwLF^v7f6FedW*oF8b*n8U<0`~5jTYfk!C;2n7Ujcs=$&rV`rnTvc)(d zt-ro*hIA<^adrd}jHTk3+xpLa;*}C)V{;oiF7-v3xu8W}d#`e}ayjD=DQQ>5S_ZMq zc63SB*Dj=;7L`EEvE`(Sic_3|z=|e$evWL!u&5oj@B7B_wQ22j1uzbCc(U*Vk|3WK zE`5-CSg+zeMsxz6jxr6PDUJ~aTIPRu(#`8tYV!{<{1ubziR8KP*4#6+ct7BI63xoU zvj+3i076KWO%^`HFmL{Q9dD?}xnrOi+|=#C<${;f^A3pM7`YhQ7hOW;@KENrlf zAW;^h3aUwk(m>$uX7=QvJT^mACvaH?u9=X-cqXegp-Tw+Sf2wl`vy69WYoM@Oo5ke zb*nqi2bps^eVa2f1)=pD)nt=XdgV-!HG>qOAE9w*!GAat`vS+zAU7cj3E& znRqO!220C$VIO>i7)&ZAy{-O{3eW!Dg>wNIi|bu8T??~c) z&GvEPPJ`vDqpC3UH8SI7pxnQ!E0fBY(mG1M5J~S*vP>>}fu?0>ms2Vjax#8Jl}p!K zU671kVkW}5$aS$*7bU?p)npMCc+{rRcL)e9+7b`22U%D{{^^FR#}A5O-}lR@FsrFY z0me2RE;^aM#13Y+oXDknA3>dH`I$}{5;^#7iHlI+7?8J%#%AOqFe3T{=$2A>q>UBr zrg?Z600Pw0Ell8@q-Ttt8TB)>b8M#dqM`Hl$A07vV`i^4(u>Q6*-SZ^rjFf)?McY= zcEYO?)CvXr=#;pR$l7EPOS+}+V5VZ!0A>xAh%Kff{MA}!jahJ!wV=~yYP40B(m7BF zDI!H+3$`+3w!uh`60q1#tNO3Bn*uf~wVs2L#wK5k7)Vr30CCj%IG+~0dFyfS$JY2! zxqMZ-(CVifaeG|>in$pR9Rb~;23l8!G$0VR{yMOoOu*$w$pDE@&9nm5gv<7F^rDxy z4p(xNB;dvH#^nFUv)+;hc8S~ z;f~z)+VC}ml5QH>R6u-=qY9sSVKwY90l+i5;}$f&0T?#X@`dEoq*w}-Ke>33W`$FWC3)B^genqEXKdlYdZFN}4@O#**&9`1p@(7>-gJE;eb__SO&7;BiWWg5f82Puq`@;IM#a%KjzB%VM)#XgO?mod-S-A1db50yeFOC2{7!a z{`d*Ji^Z5UtdKX1#53+i(I@sZEUh9TX~cNtsmzaCNy!Kzu~Ir%^g3)HaLME0Rafib z*O~^1BC#ly@b$*bS9j<~EXK6RDkW;%?)hDKwO+|&3HGz#{mYDgN2?;=1MQ2qvrUV15vDj!z7-Qtg9FCa5l~zLf`AV{R6wr?;5Usayz)gPykD>-4w-ZMWToCF74P#5F`VLYxyN# zE8hn`X5maBoz;rCURbICF8UxSJYETZI5q9NrV#OlZ6uw5;mk)++n!YgTXh>71Hzhv z_^Id~&RXV{HV`9&>X=Lcc))L4&bp5!lXDV(Hno<{yt&wf1jq#`Yu)8}Z#H5E_;E6_ z#qN^7(?Gq`Ml`9JuunW4Ul68Xe>$fEfrE4MA1Z_2luRuadZzd_6}XUxLx%vE*2nL8Ln+HlFTGwedFnbe-^v|9Fl6qO95Qthwp7K zu;A!KJOQw^tT&=%U*NFoKJv!(as7T={GzjYLjeiyYoZ}?4uUsp z(P#etzw`SqN#k|Y9>EK`DfrB6S%x|*-WlmWxabnn7SM7dT)-9TcZ zNoMgaKoebl<~0J2XsCE)Mg~J_;pj6z&gidhs#?S%@kwYgQ?t~T@t%N*m-C*_lhPMD zfq7*Em#bbmf-GYd8?)%9*P6E=`^Ev%8TDJmuFKN$fKM|5oe#SOWrz`3MfyPY;Cps1 z?q=9DhyXees?w^nk0wpWRO$6Gkhg9;)qR!1hsi>hEJA6Z>B5pxlWYk%cL1 zKE-mYmijcGge?6Gm=+`ckiD)1mh^iaLroe9}!%ZVzfdBmh>-yg%ahn2LB z`0z)=+q%hP8e@J)Ppt0@v@7CTFh!0v3A7G<|622rjpTT>Dmq#oxe~Q8LOBpE%vp9nB}dAucigNMp%`BMFACkQOQA-=%-Q`ov@Pp)d5+)DI8q^;8^JUw z3S@+fWdF&%K|QLvSUT~OC0JG9G_HZ^_Z=&n677m@rQGXJLVSCto2T=0oVbB)7zQ+m zn2^t9^i$cmGB?KcYGyd#Q;N8z#LYCh?S#;)ws18+&)|58;2-=dQT7KCFEhYw^{x~j z4kt8{Ouz3Ra}*sh9b)>363ntLiNK)5T%ywV}S1|&CpCL^nTd8mJ4 zUGhw~3?pr*W%wrwrpKhByBRT(!6ZWUKW*@5ZFsqEHjKcG={}c0H7_kKc9Y05&UfQ> zFqpt2mtpT3`JjcF5+KBP2gWB$D&Bv09H>S5=i+A~e-oVBoUe4$GwpsgLu34=zAvP< z6C0#;M_4^Vqq(VimWKY>qDPrHeXv2gQj!v`S)4l?E)!Ug$EB2BS~hR+1bR`MLCaK+QM zqT$t*r<0fvjx(%0@U#C$>qR0je3E|;nt2MunGOU^Al6c z4mTEex2De+q#5BEJ+l2S{$v9$_CtS~U6L&IzMAZtK3s2AI=6z$hn+58_j_PqZqq@} zr8$9MGO-!l4@Bjh1rP3uJ}txp{rdXmu5=IqV{fQ*Sh`@t@b4+p;n1S7m&0W%(W3Lp zv3j>p?9&xsl^uS%5Y+6sWyJx~u=vBbQ2H9mFF|zv+=IZzlYPun9iYJ$+}4hx7f#C$ z{UVVGVVanu&uShZe6-e-dKpD*4fxb@GO9NbIIX)E*`+}r5_A}p7S)8uoGL(!*V(1| zoR5Lo7dN5>kMwNnjX?si->JcTw$R6v`he_^?&Cd;zCmQuBxQO zd^?(N2d&yX6a_Cp4GXa?%YO^_zT7%px7c&W(7#h>Y0rfF)4OWfOOT&oC{r+o0F3Zp z%>(o{x(tG9G$E+uRac&awt4+5{izk{RfFyjwAt&|M$mud8@N?q!b>->#eIG3i5ia0 zx;NOOMw&@e(Ay~DA zZcV7BN(C`=35NE}QUKljtPJj?17r}Gl#t{)(JaXGi>|5kJ^Qz<_MAg z4-Pk!5{Bzk(&;{E_s)R~k4u4_Q{)9d`(uPH{m(2p+i=_pE(HSZlH#{rmf;N3X!;?_ z_dRlMx%L5hG>UbahiPai&Y!_Zo|~uoZDIAYoL* z73IY))6Y_ttY<>g&m{fze$s}r!q*b-)JXP=(I6_N-;BE86<_CYM>mCdEB3E`mCEH`ktmVkuH0?ku(~YQE@=aONv-^kU|+t{ z1oYrfuAp!B!p}Vfs}rIn7`Dt-%d?`F^2|_Gb-(>;j{#sA^-Cc}t-*^ZFlG5?F#&Yc zgor5cHKrQhVia$=DKT#qsrQ%&Jc@E9X(Q!r+E4?g7-vupn#)>4M}(TSR+~0$@PFs+ z&O~$I`)l=hZ)ZlZ)XYgkW-xa-0OBPz7+aqZpk)5+G_@2#XbOyoj189W^(yK`+sMt4 z+CJtB9LX!c$P*k-cloU3C?ZKgmAih3V9aaB4=UQ(Sap}DrhbbtDbvWy$8pIM?T~L` zvK?+@YujFRQfFFX!#shYpu9B5o}zHBRX4&05Jv9|k%oc0-&H)&Mkjy@dI0AY~bP!K?F zt1Hqg&iNiWMzOI;)cWY~>2uCsSIbw3-4JLTiADx5MwU|7in`lJh@g|nU>tGsuoM&e zpp{RJJ>P4~ox~lFHurMG;d7@vXxvgi$mNe=!ZKoYfAH8t7x)<`J*L&eQaG|je9#*V zD|MMI9{KSZmwvdrUl0|!x>Sme!^$iLmCm-yqPL3MFbuVzl;D?56 zz6~6IAd{oXXDLIQq?-N#&%O9+-H37Jj$h_u0}gst#}DhG<31CdVAI?4T`R`*W3z9Q zWI2v3(V721gr#$58`u-2|BuiG=yDK}7I~u6@~U(;f55>%Iai4ThXX@QCIpvxC|>97 z`w)ULKb_*a%y{;OIRG3|z>iaSPPMxzmjE-<%0Ocls)b`#TNRIBB;QpU+yP<*pr6vTuTW?hm5U2=xNwc1h(JPxJWVuiY!g zLG9*jradhzfzk5g@civMBEyD!K_Xn6Wz)e7IdaN|stWvCoEg)~oZ*3=vQ#zP8n20V zQ(i*`oKx>&4Ezc`ZAeM)_$shy!40=}cx3B>m>m@t{*TUuq-8-;05wNFo84w_%ff;l ze)kwo5liFCYt$Lx67U@G6CZ3e7(2e^36y2%q}~QB5W#p<2aJ&b6`36=iWGb`MX*As zAKwI}pIa)#FF%c2T~OT~8z+n${8@I~7$aFroj?Wfkv(#nbP@%rp4kvC0dK5}V8O>s zDyz(IyB<^PiL`9B{HgJU?v?m*PiqS&Rw?o-;aB>>2nl%f8^{x-s~c zqcpChS2QtEi=)-JiVGjYx!6hC97mPbS(Kv+TuO-y_3v>Mr^;fZjWdi8K)FO3?8Qxpa)_?>oIBhnS9m?UY)<*PI|yxEzg0P+69 zON=Ga44U(eK<#$6U&skHh9=0+ro%7HENU`kgY6>TbcOllC85Qo@~$Uo%FVj|_PF~K zEzOi<*!$INcTkHTTh+3e0G+q&l1xl4T7eCCV^Dfzc+|}I3L6=kn-&%&U5@5-C-`ym z#^+A#gavB3xS_TKw(!CkV8u$2yDp!1x5^ZjOn;9toQeqC=`aQ8nXrt)*nqy8jZ4~} zkHbTIk(-xN=fA6dw<~xij2F0gN&GWk!Qu7Y8{NMJ`SERF zC>BwWFD2^Q|DwEMgYOy6{*&XCkRKsiul&`VxAz01bBz`@z%KQ1Jxk*D=)=y?D^{~~ zO5yofRkWJAV$Bc)QK!B*;WeGSj38?-mSf=pV2^FtIRStb*WP_o&1g*!w%4LeZGh&7 zbO7bp_e`hG&G;ml^Zw-H6y>|GdPYZ^4wjMwMv~(kgJ;xtGGBu9Wr<{4Hd&c;@7;WQ zyUoRU2=Xd_&|3n2o z9c>Bq$bZ`qX~VGrhBh^E?Zy-42!o@LP+>yY{gBb5!#T!D0chSZ5++LB<{qWe!dB=< zF4t63xVtDp^2q@E^@#dGtAHRfT(IObPeW*}mqsDR@d3J&WX@E?2H9LQ)%XJfXJPHq zv0T&1T`nGLGuEcE{KTfSf)*g@gFdqWVG#)6v`F9@bOHGJgwFyZ9yZWUm3|dlApkvv zTTCRK!w@^mU~>rL1N)N^$+L6Ic*La=qi{N`J0opkLLwrcb+UQ3arx0iQiXz8xf3$Y z!dn*1yy1OewQlK(yt)02udHC!kndQuNz6%z0>=FZBp;pm^&hBx+zMvdq+;UHpH!)l z^n|K-GRn2}TLY(EZ=3?#dZn_uFHSGMs~Qj}pd3%cc1A?%H9cry^ozM-$bFlo?9obJ za*(9G`cDfFLR%GD7!ur%0;bJ1YI9Ddi@Urpf)GUtf2}JnIzAE?PPDve{9y|S&wZOq zDMh!Xt#SC19T47pf_>qC@0&JD#jDRLv6=G*qU(CJv`&dE1~h`EP1+g#@A%I>#`05W zUJh@w>JK&yV_UqJvaE6#UIBf_KYXuEWfNBb8|?4CFEf(^E6XQA4C4T905`hB%r0z2Do}ot*o5W{IrFbC?@V&7*4i4kKN%~mgS4Ws z5@pR-tk<%-wS;x2ra!Pu;6lNxUreESUg3;TFCJk1kMVm|gDO!xM)}hR#+G&q6KJhu o?*7r4rAG%+zn;O8<2rZ1YUvS4q0M)7l;10RoM+oadkfA005XJ}KmY&$ literal 0 HcmV?d00001 diff --git a/media/so101/follower_zero.webp b/media/so101/follower_zero.webp new file mode 100644 index 0000000000000000000000000000000000000000..7de5037a31bf7211685f2197d6bfe0e6037e89ef GIT binary patch literal 65314 zcmV(|K+(TaNk&E*{{R42MM6+kP&gnC{{R4RzXqKFDgXum2tI8#mr0}|qN<>>IFV2d ziD_;vz~>LN%=h{GuK)91;hO*cvlI_rw0-sPD32}wx%94NyHD~AIW3s>;p_CNnR z66dZnY&0eM-&i~N$(E2=iuz%k6wf)ikvCM4w|Nn1C z-$wrz!e7t6|NYnaPxC+bzE^So_Ff5n)=#JUKl*%Y`D_0l`F~*_%737LqW<^!9r6v` zuN|$|{y+Yo_TI&w=Y5y{Px#N@FZ91OG#UG6P;c-*yMER9rTWLPH|Y;ppY}ie|G@R( z`q%r9+Yf>d=fB^-@_O-mQT^BUrwEFDd{8oo@a=Fb)6tIRH$ASo4j%|$`pB5!{jnho zHQpFLvL-lxb82buh7YWXcXw)O@NTX%rlCB#+GQVgS1mO7LkKdWsfQeH7aoa|*Nmsq zNMOEj>y4JL!d=eC&8ert7nTOXNEzaDX^v^Il0ARa(#E%eLnxV9WG6Ph;j3R4&l_MD zj*A_YL8s-icrq-}e(cUvMfMRG%LqQXL=JXN{0|VN@Ee012hpkOW6_z~h5rmBL=FqHK%8b}l`=^X?(Ae_! z%{}Umq3Ncf7^SC-yHgdAL!r_++mg0P;xg?*1t=@SXWSF|2r`l5shFg1mqfeZT-zlx zB}1t(2sle^F({ZLdwW`Na@ph%{_yNULE;*~~6X;OI^y4kFhnBki~~eJ)FX195lL zG*h^CghA|9a-uzwpqW7c``i-rm%1JV1pI|1R^0D%TT7sCIJ-5|I$eS-&Y%hy)IpyZ@CseD1GLLCtKb1~f#QB2-21?~tjc5B`F?js0VLGD zqK1gAgobnwd@TMA!(g-vEQo%q_Pt-9ZgjOw5!j8ne|!Ntqir$6`>o^F(sb8&)`Dku z%JCU!t(`26DJ7^$Er`18VL_tXj@PeCkjnQGM=2RFm2am#h+1L>cpGWu zPUA8W`P|)C0k+6AYMOj6Gh;5+zG>@X5eEuL-}Hkn36;8O$p`#QVAN&%!w2SXibxk( zN{29kd&AFCHf_g6UQ1>!bUfByG{?c_9sA4=gneYy;=U5WyQw2?Yl&w1Eoq;$mf|p9 z9%9jHEKLAIX{W<1&(-?VpwXl73TJcDkd$IqjJsd-jjAux^1!^3WxgpTr8yHFHkBTS zSV8wnX~d+3>L!@HcpF^fmauyjx=k~PA=i(riHsVJt(xZ5aLJT|DOB0Z_HI}%kE{;oeQhU8vb=jXUw$rK6qz9W zHM2!rI~@)ZK%(f?3efo4b*HNSR6qM^i(8PL;&R;2SJu%v!(=xwJp_Cb*qz2d;#^8vf!XyAeFiH2CL488(i_7WQjG!2^aeg#ipTKUA!-Nacz8k zU!PJ;vopGOu6*csDly7ihd4^tXn*7To2_Ajj;KUqlrZ|7CI^dEXOU-F{lac;SkM6C5X5QjK-lf%?`U|EDKb`xT+R?YT z+jMJKK9sT8$@NCp}h|_faaxAqO!KBU$cT9M$Fe} zakjqUs%iisoNOd8(^Fde05L0sd=7r|jAq+HeA))_-Fw;{? zpVo^^V_6d%Ke?T(#Lf$5o-!kb#P~)02xg~t$rI;SnAH=o_E=U629~JLn_DRPZY9wbuhKWGCTh#`Yb`7`vUw*!W#vGm&-qetT@?lRsNj zvL-rQ`l2zglc{w%-tFDjk7kS8e~JR} zA#`v_@WX77hCUc^fZ{D}ClIY{_)hWI29;LhjcM@JwGkX+Z|n^8oVCwR{{ikZJNRL^ z$|&y?$`;~_{fxJR+~E`Gc&XEfH`NG1pPRw*UA&7*t7xqg%E0t$N{VD0q1J8r=ByjwIId$+lI|jE6CN00KT#& zHu-i;_3|?W#b9jFet%j8&lE-ji;1Kw#$x(OmS)uK=*C^Ctv4|nS-2+6O8uJ!gx1d9 zTukAzWfiRhm$x;=BT$oZEJwpE%YT-%MOA4gJjPEC@KVCEo!? zs`~*y%>NAzAK!8|iQ2^7HQvAZb$;e?>^NxlEv__dTjnkzRyD;lHB?Ah{))njb>M9Z7nYga zM-Q9zXRxpY%Nir0U&^Chn1BYc(+c~M+IThliK&X|P}^ApvZWe^3O_Hi@)wJ{GnEm3 zbdSd`{{htSmf{|2yvo6D0*#%8Jw7FJdvq#IYm!vwlXY2zO}zb14&%M-ph zJ)AvvdR&&T0bfO#YV*C>Zz)+%#U&NeY}-wy2kqDfK)(@j5o?{@01+hOEb3tJQ%j-^ zOyrFx4;W?D%4gzZ1~u06U6s;Xmtdzhw&-h7rfYYK&3Oe7GAcuXk9#bW$SM#s9y{ZN z9~+&2}RsE@1b<~E#<;gok)bWsOmg6t!2=Mj5alJwJS#S>lcGh;9}8cgE|I(CGt zcB-9GPwSVqALJy8?s#j^0b|rnY51PwT7ADxV7W@QeSvwLA+5_^vb`f6?}Z=O7uEgE z%kt`h1}(Qq?EvoTx>MB>KH5ofY4vKuwBF;}ZOP3t$8Q761yZ)e9Z`Hdc+;*7Cg^r( zf8NU~@aYZ(TEG(W6a8CR6Pz&`&cA8K`af*n4Epal#mAHcc^*mYu5B2K5c!O0Zl+;l z(9zu_oNDGAD&6;=m(y`NUKLY<81)yw+nL#}pIe|dvyk`?vfQPGJnG2hjh!7D z|Azr|!{gJU3KKClr*8(zgg$qLAKGWqDYhvhF_xm?~0y3*4I5YphlYOO!dP82TAa>6g-49aEV;~`h;V+_7U*wqDTCwU; z4wH9o$@_xI%cKiA!GR-X186ZChC{E8@~bm#;Z4pz#ChC4?(S(}nG?4Z3Q11bk)e-! zIdpS zbP2I3fk`AkJS~KY-9tejY73oFUr3B032AyC$KN{y?`x-6<@beA^u0wS3~tRR{mx7j z2;}rU^dhRUXi;piRj6)McUo!jydF)zkTPICG9gzY@rnkj4}#haxa#>kEUbWow-Ei> zeTn!Qb~Ow-*^26p!2*dzF|ydf5-S?4CJcU#>I%yUbNCcdU+Gn5?@Kj z?B#dyLwH3Z75A{DkmivQq=8dSb>%S1iX$B8&vSZ9SUg5ZD{6;d^o^e*mwEQmy-HE) zW7A+NE2#-3*Q`3R{D++Y79kZ9_>+a?HOc8)vsls10YN@43Y^8P2MR7Y+e7tVu9*S{ zlU%4t*i@l8fAKs}#X-60w@fl|fv;QR-D(s0 z9xx%pu?a$!TI7K^IM{QwJ;Y-MHf`lT?rKnOXD#y+D#q_Kem@nD4_1mPv7QyeF6jf< z>|Q+hl1TvBpjB=HwW%Q9yn&XAAiI#yLsZ*zBzGrzIkTZ%(e9(?QdkW<06)NVpYTPT(S5ewrvr zfR75eq(MTRZiq`iyPk%_iWoSwUV5$<%zT>g@u%AarUqXnZ~$hn$R1-SzDIM?b`oheBKh2=TgbvU5n@j*(ZkAjSAi> z%rFJ(vXMB(GP{+&0-QhG4gQ^)v7;#u|HDB}u$=!FsDz~RFI;GNTK1aF_jleRBb=DE z5$}k6VU32+jNVC;a-A<|IieWN8EYTzzCj42toyfMP=)tFd4jL>Tm*ihGT<=vQ}cq- zW>)qFMd0s#Wg8-Hr$XEZ7GP;`(*`&e1GC}W7|4YLOwdJtQ!3yPBOeY1DYE@HPkNGr zZKw2ns2zn3!})^7?-RsB768hM@%AH8wia7R(Dj??6|K?tSa_sd35MPoX-B70gRaW1 zzqjq%x&Z1y5K@ADNOl(_uN2-3057WwR>?{^E0B$p(QJP<2JZUTBZ8zQVckT60O+mT z<|v;JF#kza;+{l?5-BEbdtmZDdmt9Re#`ce{bGJwZhDN-^%GQ4P;Gosjt+jS)G`sz zT$t_HknwjXw8;Rk2Wa7v1xf(qSz{=A)T{vAB8GsZ@E;5KL2j$l3-ohw?w9h0SgaNI^waT zBCJ9nzOAL*gs1W0O>CU717@}^#~uM4YXb=f24J5h=Ql=1R0Oz;`hY?1Q9MrNoJ_B# zX5Ul2BGEt8wgSWKmXim^wkeL)Si1!Vrb5n!dYI!{@((*HiQf`-L7bAtfSuKv2C=cA zr5`UMmz)`}12u2hAhbuU+STN#g&lun+eD-m3VyNHaw~-G>s~I-2na5xnj&1oH_{ZKDc<%wz{!+6#6R@N(2(yb2>Hxq1r#fme?S+T zfNFbK2!3>@JKLjw<64s@ACJ)gev^BZ0v$-^gx%0G6S)CX5!0hLsqci+|EDScBffdS z7GgLL6HvuISoh^a!+Ie9!Db7}5Veb;Y)MatOOEj{o}iUE%qWTyGtlc=Na!-1+4&2Ql;GhUs&CL#oT7L2`L+Aktvet zNfM$R**{}I1qTnRZfP#44EM=AoOoElZeP}a(|SLIyp)97&gP6CL~9UkpfHP@u0lHT zR5TA;F$yAc@C+bv$~iDUQk`aWN8Dz|J0Gmy|IxrtNLanRq5xz;o)5`!l15LVdm>5) zY0l+7YJMNJ1~Y=TQSdq25m@+4BVj>ruA*_eO}DU*mZtL}f^6y!eEd1?wvpH6Yw| z>?i0Ye~Ua~gq<=wEqxbW_zU6TlF{rVogR@UuBrsE|VI z`%mZL?qfgiYV102|7(B8ev;~gJ$J)8K^F{&Z8{yJ^#{*Ed$3p(dX{QP<8D(4%gDEA zBdxDU83q)FisU|>d{&Tf=TdX#oeNU`qSX&LKyi9O%xdu0*`a1_LzUCx_0N32h^I)r zqYVl`CXWma)3_2SPTDEqDMiH)ZU&Q5y1i`jfF@iGJ#iyYI|(=LN;pvYMW<19+5s(z z_V1CS@V$K@tZaHFjb?YHm^z@tBRTtrJ&W>3v}E4MOYhHIGS%Sv8Lk4h4_ao8$u84V z?l&ISnDK~~^oKhW2>*Zd`6rs`|9|wD7iLBBGLGq~^~72F zYj^$ccbzG5F)2b-(A)TY`!PSIiLiq=h+)#GJj36&PLo02i2a$S)HFmz;ej52Ts<7imoejho9*m$$4wMdO7|whc%U&klp-ZNZ)`A*pkRH+v#k zZLh%xod`UVID!Wt7~ex!4&dznE>Aau;eUm^adUlJ`>6>}hHuX(r>6nWImaS>HTrQ1 zohM8kNPZUXH(4#Sk3cqD|Ioi)g|m`a4j1-a^U}0hDO|#c`_pqne^G9`QZr+%3qIDX zP#~UZ;UvbJs99yBmN+3l-VG?*?|PGJPlna}OMpNP0EZAc*3_W8o#a#FA7AMAT!XQm zA3R4-00%fn7Lm;%Xd>UH8<}nCwCO|086@Z2qDft8ntcox1l=yhm z6tAV*T-8mGb9MD|kp4SmGO;#Bk6rQv>4Z!TN-VDG!Uqk6g~{|YoxMrQ+3j^7zc(8k z+t?faT_Y2!S(CNd!)Pzy8?XRD#3WyAvw{yBtqtu4sqNJK6Pzy5dw$E#iqVgzSeSm}#*pg@u7b(n9 ztGE9T;S6eLa9808rmvsDs&{v9q}FyQ1L;dgfP1Yvf%dLmA&9`65AGE=m|U{@baMyfV3omxrL1Tt=*~E{`bp(AL=F&eJrq? z2hfgy1B~68n})l2Y`W?*4~vCimX}E6=L_UHl_vwW*{vp-_y=8rI!ym|M;3v91Akfg zvDCI7|G)@yVOv3IhT!@Nlv8$4Ink1?0EAH+ezc^k&B$^q!zkD<1d=MOEmcSq3?guuw>sCmqAc&VX>HdcOck>>9`EDnMr%XgG zftuxH5?5XD5L)56&>j6(B4|g=%Strd%*hm}y$V*2OgluKs%V$lk3d?RX7ocGfU}nC zTbXfzgGW5F=&~oxdeB|2#?0!@y*uibXE|(kWYG`URn3kJ^aZ5PG zz0@!XL))Oy>UcZMepIc2diyagM`LD6Bw;Tz1dgc)re#E@11PR;W_T~K;g)Z8fr6TZ zyocrzNJ^(Rg_io8mODNr3`r;sDjaS=*I10|-4hopMGQB}#LL|12~t9v`1xsjj&C5k zCIjBA<}qz-EVoJ;>)4cwMu;#Jg7$)z9R?{Uq`5Xpn zqp2T(c^3q1%tXbZYMz_Z5p!3d_r+Hw;z9#jEACM14oP@FFQm|`_&+X?Y(sZsRE`XB zsNl%5mbLzky^3$pKeifpZlL-YJBRVTtbv{^!e~(LmT}mYZT8P5#c32)NriC9kV+-R2;=BMO@u z5}h0dPc%dluQf{;AmVd@y&hN`FP%G&O)!^^jB2tM@9furE-IPAZkVU)4M;84_7zPb z{SnJfXGjJLx?6cHRin+R{^yyKjUb&oUBXk!QOo3BWeA@jt?~_^Hs-^TnfEDlZuXQ?e_p$KDc2I>E%fT0UNE0WuZk z#4~HRh~S0Df_`m2&{>N;ey}3%gM=LcJ4pXe@W~|%=qwVF%SWpOwrO3<*7q!aq@^F! zJV_o=>;r1FU@kI&Z>zF(dNsj+qnej|_t_k(utA=WaI5NqS3=+x+Q5w7HJ0!W zS7sb8^C{fmZ3a&O&Ni8Y5DbOieDSIwK+15uK(bi)2o77U{peA99vC?km8s{8{!|mI z)yBJMs+%38+|%x(1fjaa*qFI~(jfBm))`jbo^vr)Z&B9&HPt~R<6>dG=uJKsVeLo~ z(dvZkQUSjjJE7IU1Es?csBMa1HVGD^xK445!#Bs;>J%noS2|!4vZrK*?!XWMt4=mB5Ry&tC;kl_e>NfDgbng8l0X!+$b4u?ux;NA zhEs6L1*@M{e1u?YlfiW(^WLvfb2Kn(#}*%>UN-zd3r}{e^E7##!6LGKj(y$8A$!IK%^mAme`DM_B2Z5s(txIJ5zOyj2{F|xMxB=8vT zcW1p}7%}txmP4>Q$R+2%pSwmx$(l$rQoo+cW4ijBe^KOs!Dm%6Nor4N$~88ZGUMqy zX3`1tmcRRtZ6^z4tTPl|J6|$WbHTes{9Cv7j~x{=EN29fh8S801RQ0JZI#soH4dNw zM>zS@>aADIeoyA#SqCBv>D`Y*lKmsrIXr8ES`r273<@upR~XYEwbY`K;!^BP2@XS?pp zS~%E!H{}VpyEcsDJDcHRH<8h2rN#_zTAaoT3*sNhs6NbxhXbY;wHPrpQHM+Wo_zKy zIIvlM&^t-C^<92UC+V>gF9J8yfkZ0#q_9TaFZsUs`ZLxJHuNJdW-UjTXlta*M17$5 z{$Kjo1_}EuOEv z3lqkk4)0MRH*kcD`s_t#%cJ(pca3GiiB;BW!vxJe@*R^rv^|D6(B+)-j{{x91_}*I zXgvnN>6kr~aFD%J24(Og5D(Rix+J#0x9dHI-{x>J=skD@SI|0imtT#4x||FGLmvb? zhiNC_kVen)CGI_9ZJoJ8j($7$qxspCjz_BqNI(``KhZw5>kF+s znhcB|)uvpqh%-ZM(M!56Gb|r^L@!GyIj#@P%9xc!+6)N@%03-;y#maG5t)l{K?R_)IYQ!B(8 zpzs3aYaHV2AGZx4d&Q0LXIV&0GD^5M(a_w`K(TE|4-nufa3s1`cu7~RuKNx#1)))x zwHyMIQMu(3u}6zlbidN8dQ{7`3nHJW1Lx4%QZUngBuCtd-Ft-t zvfO18>VwJUH5GkHlx;PS*S>y|6NT0jeTh_^m7|+yqT~O6-{^f{n~6?=B%zaZGCxlx z$pn@G=JocqQax*=V7vS{v1eJvY&aZKbpasV9+|=pmN^@lKFnXRze~Uo*Mk^akNz4D0vPA0TGnh~5 zhY`H((*|1PaeZ)%v!snGq3ZU~hZ3U`9wrrl$M_$y_#svwP+9$3oqUSJOTN#^tgh?y z6)Ol^Lj$jptV(&e01myN+Yg+78LI6Y9&fmE!)z+NFZ}InCjdIb7!U3#>i-7+S$4vt zB5x!Qw06}z!7_kE;O`lv8y28`{OzL(-1G(y`T(dD6enrJX5+?|2xEtsr|C8Y!^Kl1 zjM})z5wQ?+Q)<`iiT`3R(x4M*g19SzQ2Tsa{5(4@UybSy@WQbZ5G3r5PTDRVzkM0I z`e{{pp`RFYlF>6~v>L_mVej{&P-n~mc^8qvT6)s27rT!gVS^ zd2Txl+$|5aXJ@-aZ;|ZNoj-A+R+CYuC{b&Npq9WBud&EO-Ahz}!&^)@Byb=r*EQIX zYW4?kjE@`x`b>i@R|+?{-@b+VUa`2@E%e7S_Q}@{V`uJejm{%zw#7h2y2IIij8-+0-;E@9 z#khIO8%Ylk9<}TzessnbjrVv{Ei68T#Oh<(8R#X|IjbuFMS=2Y* zTXaeOCo|60yt0-?RD%iA`Fvh)N7{TdBz{LKmoDc&*=9#7%hwMQLxV(F70VA@5cd># zlcMer4|t;CCQ=N*y8R1>?zE=vXv@0# zkwGEiQsrd&QJRqt%qfAFpo#(vo~C^>*uia~c~I}K(2@$z{aa~|U^)YIpDaABKk zxyG7ZE;bR6YUTjxBy+2sq|Ufc+9zg^n8^sPH835N<3noUttTsk$mwK68sj#Y-*>1~ z{}Zs(W!jo}(WW^P%H?Oxo}^~A3`d8y;sS@sRre|SQcsKpjj%1ReudCI9+$_#drT3J zsZzxXXX_9a?Wwa2*EXoX%s-M4Lz2HA+}fJzjnp?#qP8EqkhaW>qg`;YJ>7W#6z`v~ zE8((AE1On4;pdP_MccH|turO4J#D##Q#H_Vo9(k1A3V3OwZ^s&aHJ6TWdkj7DF7v4 zjb3eS7cHAZmPZeJEJb|fPNZSLRu6&}%@e4s%a=5U-nWw%qh{bT?GlZJ_z8;>lDR2s zg117-eL7Q$No^o4{t{*(SHH_o<;&Ul*)6UXQUnrB8X_8`*^W#bT=C%ygmIUA%#1!5 zd+90tYl(@43pb?cl97M6x+2uIa}h%SgV*--o2e)MM9r_pii3IeB!&y)11`+J#@Gaq zy>&q(Z!+HjVgfuq5rBYfYzF?QNMQB91@MLu zx7LWuwLb%Z+f{-RjXons2>uJfppl+3e=aU;OZ<%b$vZ$F_l|n$eDL5=bZb#_NNw}! zJJ#E9PBt0z>=nvzt7Uv}ChQIRUGJpuu&7&{<`%J7Ltp}~;Rqw-gyXdOIB zpr2YM)soj}8qLP)=eO9a{p#u;8<;HxUQ=SZd!1c%&I2if+5uTL7}l1k-Uc9L=rfOS zEr=zR-OYv-wjX%tVI>{R%e6O@11U@PI7sB5yJa3wjtREUWF$}6yj}YQW^La2lIE&k z+)3od3UoI{L?O>dx@}V)*LlaQb7PLQ%lC}Sez2{#;~Z_F;K$DSA4v0oin|Ma=a2rT zHa~@+*=)tiZ{yZi(u;LZh5H^Q7;Y>z_3YE*d%@BohNCuwhMQmV=yr15MkXKZA>1@p zeYw~W!FHW8z-sQ{eMkYShGjrwcFVe9)L9{@w(!qH<)sEK115+x3P{{G3qOP~U7|0W ziQT;zEVH5R_yC9nvR3-fh*9JuD+Aa?a$Vmn@%U@REPc(6W%V+9k-knhav$R%MM$Xj zVbVs-WB(MjH6p{O5|OycB$30IB%&nWxTVtP6-M@0H(!KB>}T@m13tH-C@TW*+|Ue< z2qUzSyOx=|bbVw@d0}4|fntS{!ebDqbn|Z59)hWRumB1qwt)x^F>NY zf?YJ){=u=@;pa6?8lMPZ2?pW6yyjks7>28p8Qat-w5;2X4v(IgoObQU`Ty4T?ATMg z+#!Sys0I5BdBLUV!DMIe{ptB2=p%S(xn9&|#pOULzl|BsTSfCni9AMK)}W6`=#a}y z6#j&PJ$HE;5^G#=i?I>;!=(0o1$9r}eMQF`XmMi;8HeL-YUadVQE6vlaLcN0fnl)L zt^RrbBL#FD$Tl%aDVcY69l5 zJ$dROWx}p_iFx$rW}c&Tn7nG*JmCG_-s}E#sv-lW)3sAgZh#+ni zl4&jMSz)t^1I((#n@6dDkYJZZ#cYDvk*HRKHvfJRr4Qw2gX3KBmueTSBd(+wqQzI{ z+qG%!HpkE0sQ@{V~QlL zH>XMWWGox|&$>ykUD`^wao8w=RMCe4vOz5lON{A$&)EIIJ>aX+$Eoffxcv9SqhKGL z&B!M|gDJRVxQv`)uo{dz)jtE?PEXA%7edi}Y|`~~*?D9j<1HK7&6UUlzv{_rnFr|b zeJwDD$oZ^AQ@#UST0#gi+(r@YntUMs>%Z!$-ANNL@QA_rNx*)+0~_BUmXQw^h+2|*ba)Kr?SL#PeSwTZi5Kp zabZ=dkeSc+ie2Cvk;e7^W3yR$&36X+PrU67u-5o1LsHic2ZKEe*0kB3bMCE^Bbe%w z0(oAu`K(A-e&v-2ktzmXEMaGW<405%e~dO1bGvg}NyGiB9^<(%(LBdEV6OXD<9|tn zXhpcFOI_~PgXlN{9Ffq$k41}H=G{6vpvu!e$@_jLh_UwA594l;Vm+?C1+G^2Qs?cx?>rGVItEL4^$=(AF% zklhvjcRVo9YfYbrkhMIjX4lEUepOT3ZqYh^C$oB6iBme@r!2=eH{J0GVI$r$nY*8O z(0B9CC4eXjCH5@!7ifaFQ0!eFSs9)1X#W6VoF2Rg)z7?p`?kMP2D%;UEf@c^4+#xg zdQN0NyrD#7S1rWru!Tm0jQeyIkt6~bKF_Cu3M^VfsJgzymwrl?Md}Tx1QL}B=1M97 zk9H-U1z%4p;mdS=pKROKIa_9)Gud65@d7uDxjz0$!MPc4WKn4CB~WSLN7RK|`di!c zGnF;ubNCcD2qEf*d-WB(D)^E-AS>t^;+gwWYF$$cX+ZMfve&!?S4MijlyMl08Bt5= z=Bx%D0tt_CeW9$Hf5pGz2_tHczJr{(bYWAz9BXMGeYyb>iXlGJDZSB4fg5S<0IdWh zNj=a>NUZd9P3lAGfPkUqZsv=QjY}tRSBy%LAr6KDf{;IEAAizWJc_|T$-4jm{^i$X$U~Y?Q~IdaE!?!O@G^o2`y!Q8BBuMP z<<7<#=D;(2%|knuOD0C=#cmwT01(ATxulPt3xMwpj4FYk!{DbUJ;d_FqG+v4c?iIG;2x-`+ zN-fwLqt z$(@?S;VBVg#gP>!Qx78o+6u&;AS+4+z7Hz%!L8JWfQoDKA5T5+2u6yLe5eGo> ztET$kP8gKaaC9=Nx>0mkn4SNjZBe>80Io}? z#f-*f@Bxcxd5bb?$>fj-=GF=R>a&0qSP-B(WB^u+JroLCl=N^s++_PNG=;*VaNm-& zKq3Xw^V$yzt4CTbTBfinFs2rPAE-mr1|bQAfoWTj-FQt2BPD4CljB9kg%~AZpNS&^ zYpuAGW+JsfVU=t;jbz6(cG3=0fSi+q8(^{ZP;c&i2j#pJ2y(HCS~;>r8EZ(erIvZ9 zuuFVw{W7(lK-+@(0v#80q}cq+veRZzk8ZyiD2J5CV@%r8ck`b55@P_ZE0wHUno=TT z_^S#`lvWk2Tbm;zJklDvhJGwd+U{Q)*3Ry6cfN)>xR^m5c$&LtYa;&5MK$RcTKf>u zu8R_il!=qw^A`At%fb?ddSq+zRAfT3@xWh=u~tK0EAS>Qlh#}_oPSKOdyztPY?-w% z>FuU}Q^-)BgitRqT%qH{%4r4aDIZQ9`ZYLFRtey}PJdGQ&Z3y`+~4M9cEm8@8&6}q z_>3k*BgXk7VRa2GE@usVFfXZ74CL|6e20{qIS7-=6zXKj*U%K&mn?+m?@&l|V!P6& z;|L6(OWxRtXvkg9@;w+!C7dnqt^xv`Go`=cd^v7kEp$FW3HJs!;ww7t7X}4s&zKw? zj(Z4T9@I)y;l+-e5z5jD0KyJ~x@H)e>kYw=33cs}#bmCpD?Wy6u%v*Kjmn{n9^*KV zO3aHWgkw`Q-mPxu^b7ak={8-!IB+Q9uo2r2%28!iLGyqI$r4uP@C4nAE?dsbtJnbI z$@N*>zl}gN4vVl>nE*Bx@x^xRj6`sW&qqa%+I2TwNPQo9 zQ?m>uH^@;$S9xl@(NUd>IcmRv56(P|2-(M7QAt+U@;>fp1t8`4otMj&ENt<8eB!T# z6cGb*VfF6+0^T52zO$*nRoV;JD`i4V3QgK*5~7xSyv!1Y?)Q8~Y*&>*xbs+qQI^af z>s`WI(nDMJm8e<6WCBQ30!%7~L;sfsy#9-_lu8Z`f5P=SlRq~@mI)GVr5sLzd=46n zVs5oi4Jdd!}XtHRPcJbM^`)HDgwYYepU_5i>+C1SI=@`^z`wyWP%Qqth{tOC0gmP^aD!K|us6gDE^yvwn~ymO$GCN`BnS zNO2K#(g=7>`!N3r8>X9=O^VdYp@7_4-%eIPR7XtAMWBXMWgLU1!Xp}V6HguS3C`}gbh?8r=mA}Gi*wgI&GH)!Kit!+D&60=Zh0yd|r~p%Hx+l5HW@8Z27sQJB}KAz}-H@PdP|R__&G zUHPP59+rb2I-SR9Rtp5He#;UnfDquQTQ9fYmtH%41thFyWCjPazKbAAXQGCpzUXaT z0SFhm#}R2YZI50IUAM8pTL5UyTD;T!X=1<>T(iGT!zfwGFp$3{?rO(J(xcx&^QfuW z1ZW7j=>l|TH>i6Ux+;E>bsEzoFxNj`h$rhhW(grQk>6rpY>i(CDUDeL>*K5Y)6oMp zc!{i&^e&xDp*}iiyV|nPEXxtU8*>o$N?T*uhn}(Y23?G8x6(kkPj^V$H>QMNMYz2_ zUP$%#a$LE@`{i zfGIAyA?w=6Ju<+ksQ;>H^-T;&6&5pP!?aSd6+Jiy3jc=q>!r!?e!0O)^yN76{_W}g zJF9_X<&B&o-c52htGQmMRS#bvo*Sj5yDlXKX}b5Axx**`n7{}>dhn655-Vxtdw#uT z2=N5Oal770FC7AuuU-ItHd<|c@cJv;^@Ch7j^KD(x@c_g0@5mOzzLI1eafPQdyUft z4J6AIOV*z-`~V3=nsBRs_5SZCs86Bt%YetR8UV>J51{yQ}s5YF^%I4UiPJ?YgChM5?eQBn{W zq3KqZMO*x8DSnUY^BE`9z1*Y`B;UT9M}$A7B&ZGaXjJ zF&Jz5fx%VYO*bDk(ig80C`R;SPU6a{xvCPc*ZD&ySxn?! z0Hbdu&!(zs!6}()yd3btXH~L3scS%|Xu%ZKD{;`QD|Stx#-PfU;3gF30XX+2ubtpq zZC3PvoKSMnu1y{rk(_p@%di9>R@yW@CcOkhzaTnZ5pn%nHm7!J%VW# zc^PupZMa){DcyLAEi()9Ex*14eu3Q2(_TTeo!J=Y4SgZYAmwY??-uF3GTt-&8be13 z_gP(knE)<8(Z5Typ249MfcOy#<155EQ#Jk6sf^0UN0@M;PcoGxtZszpDB){&p*&Nq zFut897!qMwM#eH)F&F~!=oDw&%J4V9|H09J1!y>=svD&r$qjV}i~$jY5aE#JvS*^Z zgr#c(%kqdq>o5g%#3JShW=uhqif}zD;G;5pG9iJkLylaXdbFJ}L5L)@bjEQQ$}L~% zuJ#nQXs`9Q>PRrRAryEQ!7mrP{#2qXDs`S%~zKR5JK2&;fc_*6~{ebZia|!_P8f(HWCh$rQLpDxnRa|7m zHz(AhYkK81LdLqeeD>Mf25;1up_%%s-yy7oS6zRT!6G&I9u# zc|#!~y%yslX`b6rA7uadu%fCtkED#8uG_@5ZU9KlH~;{jLtnnpGRD9P8oP~<0#bt{ zX*dnzkua?{197E>8D^y8b&TR1T%(0lWPTs{yCvA}x zl@GPG4x>w4>E*{Jc#S3t+JHFwboUH6w4Luy3f*Qq!~g(NvVO3EPAaolamtH+L~-YC zP_8{R@+aeQ@dFRSR{>M|7oqvIjGd5-9~=O&XqAinDs$cvA2a_&+2XWAP8AKi(VE8* z4cAet>L1onE3yq-P5~0&$C|K6iRhC|8I0CONe8sL%zJe~lmlz9037a9!%C1n)5k%% zIqfRghYMKIqY{}!=f6C4j^vqxuh@!$+oDdoIzy5^F&0zeodlGrp`sH8>xYI9HB#(X zaBRm&u;8aA6l05VUy##&&3NNv@o42_>Zr+MAD?uB_~oAI62EBm+JWcr3GcJ9eEv%mml&~MzZj*+czncFt-ZvC8Nx!`^V3aOT0@0GsTR&y^SFUeceS%G7C z0CSr2BW*Ao^aPyP<1uR+04t_{k|bTEDh`u`X@GwG#OHa)XQ82bS(F6wF{6zE@}u1c zR2R}m!(Wlv3hsM-amNpf*VCQ=Koy#QpHu@3q)|bx%FkL=p(yk{#lBCe-&$*p1XFN2 zK=RcNB2R3DBmrMVl}~)6(gT117*FOs9=JL9@M7w%T zKaxY=FP(o+m)$vt>9vBmy3v;jm=M9&KpKS6U7MXXpbsKFy90_u2q9c%n37y77ywWZ z2*=F801+bdP#5Er0$(&Z+2GxqFHLz}aJ_jU1IU6M%q-P>WC#7Ab)Zag`p!~pNwm=4 zFmBD$ZJJ?Ac6bG}2bs46QGc3MF0PZ8e0CrvPCRxsh-SK<4fFt!Iskh(mj2j6DS8i( z%Wz$glok0aB*f!%HX@jA*eHmGu~2JikpI!HHxQo7H`k+#o7~mfyBP0CWYd zVS0fq%&ySWf}}Z0E6)>KzwSIuwiGb$?>1 z;^ud2vBrsmxNU$$Iw_z1ze-16QP5Opp`8`LG$QQWWh`$-Kxpq#{}!Sq_lDFogOq!P z6j3i}PTqxu#oTKqRLRT$d@`2Yc3 zPas3B8%1Bg%3h!@G)Y_(sA|>Cv4q?Js?fD7ShqCMKG{2(pkP*O*Uj8!GLN%tKP+Uo zG&{9Sg_6G<^1QlH+|5=h>aNppdQwUK3EYW$QD|oK-FJ1RQBkO35$X#FCZH5_(FbiO zHVHx&lo{DPW*l8GUeb}v@B-pw*s000SbL;{#Kq2UDR z|5WSIz5!E!Ib+#K0@3Dn4&!{6-;oTx)&GL|mJz==fq}%8-yPqfT9Ok33rR$`Iq^_xbi}5{Od4Bx`vPou4Jj^( zpiA*k-^yDxiwT}+ojnzN0MD(po$!m=HA$%coIW*%k^zhL)DXb70%MH1(LlwGsn7a>T%t6jE%6Fa!d|_sKK;!2a#C7Sf zt+ywy;U3^D+7TGoz_B8!+#5qDI60k%;Yw#cixf3SaH2|!ugRTzNWtl8^za{szopDV z9_*6eIik7T4L_<9vXXP_>;j39HlL-CB4d$-qGykB4n6{_{X24K2j-&c=M7jB$PZXA z$SQhB;d#{gFT$~yW6}|g##uFg@`eI{Ne?J{C{dS4PIbNsUKF7XlW{%736nK!1L z22?DN0JQnSm957-o5@w|0?nd#NxxwlYb^WS zBi+*^s#ceiKR9cldCS>CDc-@y5DL1Gi^jC-3J~vMQ%2OFU1Lr`lZ;YBexB{=?p}=A z4bV9m)^@*I@EMEfX%u?!?jQx}I9I!ND>#zXHzxY6irmsHs!{iNi&Z&&=;N#mBeiBK zVHD8USC!N1APG`OlsQR%mgIG|?u7^N9xp}004#S+V z&O4qPOHF)F!W1{jG}qz8kNnBk?86G9V7cH;B9oEK=e|%%mYdg!*Ix1^Y$+(Phae^n zP9G6Eriz>O>uT4EL4M^%#*KWWK^EcTH0>O)r+%4`$~1)vd6qus6M@XIY6voJ}BLO znZp9*4WtqzBrP2S7ttElC9E?!N+f=h)(|dX-MyIOunYx`TzUT3X_amZw|e%FWtbRx zL+s5{+11INRYi5CMT&iBC>o+e(M%>EGEC9TzJE)laUcymk%0VDlNKp0#qo}X+Qrbp`DbW)&{;DY%Vdl2P? zyA0?1LE&`MxJrV?k$Nnp1}7)Bg>KLV#J~Gru9iLWQ6Jf~i$*t#+@@(_@Uyr5;mT-j zWD0j)zpGrSKoi1k8cOZuQUGMrk$#F33P#(e0%YhpL5GIo3rZp^^So&HsNDHBFeAz= zk&5A4@lC0w%cDNK%j1wsgZKE}CZSK631yx5rqm(|=}j13A;ik0+0jESPecFl$HZy= zuu|+;dnb33TlX^Wau?`Oy9~$&5aihl-vnF*HQ9c+t#$WvG1ZYtUe|qIk~5@F2Z?9h zc%r$l7fsE%JJ#LKhOk2fdC1?|iLlg5`%RrzlkCNM`<_Q%s#6Lvep_vGJe_R^N{mm& zi4O6}vezzDE!j4+ee%BSKCc;jvddxT;DYHn1ADOfCy%+$<>jJDwI0FZKx7ahsQ372 z{l-m#e0IJ|GFWAu+}6W+SIPi`^1YyI!KoFAojc_sx0HTqe{Oa3s6qY!LFGiM@Cpbx;~`N5;}91bDx{Y>UBqdgZ#M& zuzRm%uv*W_<3l`fasDa_5<(Q0u4Iq*{H3F3l`Ni5S z+fC-SuE-606N9G9iREwNm|p+or2CuTiF5#0Eb=$ev|)EnzQXlbC8%T2Vk6|v@ig=y zNRSwt0BSSq(i$a&f*+kifZcaoiIWYv){8g1;cy1vXZJc^WtYX$gj{+>EiO`xAc1Ym zK`Z#q9=WiMZ$W0a@YoiP9{VUkYS2Rv40TWt7_Im++ChHSsBI9Fxf!MUzv57IxLE*{ z(u5umKe~AM8R=vAd}mJ=ce9FmjgO&POEymKy-X32#sCKzlK)TKb4O^D5L81|w+(fI zj#o*85P!mm#=uS^#65Dv`%EPFDFUbl!6Kbft{t;(rItWLTw?~JhY>44Yrsbp!NTh0 z?L6XDp4y!*24BSUk_=FHagt0-RzQ9e&Qpw3T?l#SyoAP%g@H8WBz%cSXz|U1BjQ@n zYwPkn=R;wU*$NAt)QO*L6)(^5z>%SilF=gA%f7U^`~1wVg>UeE<)UYmW;A zl*E(kz$uZW>uY!7PgiH>V08FHBie6y7#&J9E{G`aCLz!&NSJ_l`~M$wiHZCbX>{=R zm83!nexiAQ(mk&l18C#8n?=MgSiD1zkIemy)Z#d;tl`&-9GXmwW~x48DcOog(AOJ7d$z>T>|K|CV zZbLI}@prCHyeb!qD?hCde`j%yv4~r}?sm>+PmpD-l{Bdp$LbqWV}o6Nh-o5}LtCg!Fu7|Woo68AF$S2t7Fvghr9Up?qIImsB$gRa zkQU%Ewgmg5v&;4g93Sq9zMG!GplJok(z)BiMeUyK6u`AJC7PbL4EVzO)DUt}`N^WKQE9iHveReuVz# zoLeQ2eJd?vq)vF##XRF>1<#6L6hcsS@Fuie<&oA_Y_yRjHCRo5^wy_*YPxc(GI>3t z<>U#)LD3shTyu6}xDH>YmAlbis1I5CYv&50XsBH~rNJ%TY16qU6a)D6IJCiE8}~xR z8h9k7{#OthEOnz8B@$<)8rzO#cHf-j;cH0Lz2^Am)^>-}!eM@`gC?I@rLy02Jg6bbh(zYt9m!G8YsVpRIng<%>k|7E}iL;H!`MiHyCeehTnWIkNbRP}YQXlWa9 zpwKw?8~i(-GOt@ZWa-%=u1by*;oTZyAP0EY8i;yMO`(}VqKd7yXrY;=xWZuSX&xUo=IG_FvG_=X$XA+c#O5U9Zl zqYh*Hf2x45>|FLY43qKJGOZ^Wz-zUd3~Tg`6ov^j-tzP_fMK%Op$^wM-#fIdW}Q|- zgyT5d90m>~i4AO4n#-)HbYWq3NOmA3SkHd=Fd7-~OgCeaOyiPBGQyN&T4OsfO}7aN zRq|vk(p3I-8{#Cn#4kX$T=yiR3s-Vhb-x*Y58Fc8pWDYEu9Cx9jNAVp($nV!Y0jg+ z7NJSlT5a$naLeQrh_Rw!i6&m#?9H0neIgTJL{|f+*MzXIbh|A&2|l6iT&TUwaXB)u zcM-lUQsloM-jX#APw!72#brfum4_Yl(2DHc+e9QYlu@Ag{E>#kgT)$Kxh*P$C#FVU zeP;W{O~n#DlFnN{O3OOwc~TDpEz$qkkp^Pko;L(9f3!Y!lA<2Rqfb9Pc5+LBgllfv zv$iG&(H(>IKT9njlk3|>*s0<(^H~H%Qv7r4uR-I% zfd(%$CU8Xd^?4SNm?_*;tx|DQ5}3V5A`b6fSx$0@v-%#>zT-NYFx6L<6u7q%RMs*0 z`ZdrLRf_T~a`c9U*TgDO#(G~bMBW>C*hu-C{s^&BkBad{vLm)!(l|N#-$@tEqv%R= zJj#h|X>2izdy~*si!nc~QqQJfJj7i$wz}I*AjjQ|=Q(|lhfu`);4*5NG-YP%Ht`%1N!=8W^dH9E+l2(73JYt+;aZy3P}0WrJ&)+ zu>S-r2nwMoS2?JsSSR50$&)=JY*SmR)u6zNSt8F4h&;zPj359x1!fha#lA&J(?w=` zsf9&8MUI;wC{4D)_A^u<*wXwjGPUab977a~qhdqZ)R=;eQwslmOV)o!gRA>SzU8D$ ze2IeK#4wrATRy=7_Vk7(-*zseeZ+ z5CFDHzlIzrc&hghmAe0qkA;(M33U7#u^hcIc#H(a1j()shBhL!M@*^JTJiDEtz{Iy=eY3-%J$V##;AHtV z{5w<^zOzk`lH(35$ZhbkG|VFnAk&eAO1!rsj9>!AHK@q7-MM=k(B3^(3jv|MvMUb6%n=>x!4jZ#G%QR%`!Sw!Ut)WE`7g~ zs)Xyd7pxfO-Ub1m}uIFGMvIvB!OVm=he4j3?Qu?W~B z!*lxh{6sH_Qv8)GTsxn7s@IZad!p+KS~lRtW+$tSXR)KQq(iGoXY^l$Pnv`4YjnwV zs406xiN@%SlSKhVj0{kY1^k%Uqfx8niM|D{1G*AVw7g)sw6=$X{F%O~2a1BTC*qE;vdwpF8)-GDVcEy$#ew1|)H8srL9RBU8V z7t-&haHfDjN%eN%gwEtKXv;N#B3dnQnRN2x{}2)%28b8ch!`7=xD5=8KnaR05;=~k zKJnM5k{fy+)uRz&j)G$*&~clX_+A216a~)`eOV48h0MNb;d6NU6pLJ%^!ScG+Zkoa zvEdH-u-aQ*yszVGWZ*)4FP5XG31u0w^~*)Ta(yprJV}CMNWUt+w|Nkf`%~-A9R8^h zBi=;*eLHJHHG{^mntQEE1d6(=m(TMVtUMnaF=X`P6l{rseE#rn+F+b}s&v_h`e7l% zeF4K>b1^iX_QQJ!70vQ5K=s$ZG+nX&?j)}sd6az#>N|8dK}@pB9x-o>xaGD91W$9V z0`ZrC@Gj801ZS>hmpIjgUowi91&VGQB`3zu$XzyRBs&GCugkifC(weB(goo1j8lra zBBMeYAD6nyCF$<+4inss?Y=zorQW^`Z0CD`yVYmHn48{?fDs0r$K7Y6FEiCPgq;W2 zYD368>`US2+Ec$h5iEr;w!?d8;Wu#=3(;O+Tge9F2zbEVCLMObL%CEp%m5^Z^e7Pe8`&Qo@3ubwk}+b|Elnd z@{-3x)0nH59g2FUped2Meo+cjO4qBK%kN?Bzq9Qy6OTTP;b052no3(wPXZj zShzbA%md>AP1lY{tS+2OvhJD>HC~J(EbHiQN&=$Bdx+wo)BmD){ zVh;MK+6m3cGDG2ll;n>A?0EC(p4^l=#pr=`5>!a@k55XKx zr%B_s-7t>_ej&bgV_uoZ0f!^*ahUFSE~ec6?^%b`BlomR8zgdQB6i z7c3$Qx*=5I{>_;xLmPt7dm)^dFf6m;xYY z!G{{%&rnYITv(ceSq`Z6kj=Q)NcmIfe7&uH^=%WQB)yu^&D%@T0Hyj1G*ys|%4#0@ zJGVvGq;5?{(w^L;Egap3wqAGl59|MO{>!>Ptlyo-*1*?KI9D_vOFc$3fRyBr%E>~I zsJUyJ%GUbqw(4qnOSOkns^euM6zIxL0^Ni|`NzLAK;<#hCHAuSv-Qiw^Y4kKYd!K> z@_w_OcB0VguX-*D)ROkqDBUc)Ghy?k&{^;_MNr_UIqRbEyIBBa%E4r{E@|^}!QI~; zt*+R`>iP!SF_~Jx@}rd^{xS%jKF|VXCb`S=hYPv*Od6^6wRe(nhl7!1+{+Yi3>qgK zvQM6wN&YB#MnR5VD#>V@9tmc#jKj+PX;DvAhVkLoxSRj5EeZ*jl3Xmbj8$P@#94Jo z_gm5cuwT@6ypsrR-Tj*GZKS$FONUn)`Bb-vFSWjtXLVFq;0URc^}g;pwHrvU+<9Es zq?f&-$@2ruB!h{>ZzpHoVclvu#%4^s`Xs~bwSy3Xnv7FnS@5yKYT__0sAnV|CDuVB z2_;_B`d|FyH;AsNndra0dy zXTCm{EyayvKtE(c$7pGy&f&0Wyv0=(5)2HPA0Tm|lyKtve&2rkIQ?>k*#^lgPP{+{ zKv(25WMoGcCrwrUp(V%;g2MaJ1{Cco@NU<-u0Uvzn`;)a>0%6RF2)Qv{1e^aR(IO4 z#rLo0>E%@~h|de-shL@uqz{i2c#8hX|No?YV``Oym~LKh-CLzZI#BH^Y688xD)z*!h#uk6 z`%5wJslz=+I6^Z}-cy+U##1RGbtl&tVBs`>I8O!-6e>_u*sk@;Z9Hea=*l{RKAG=~ z`0YW>l~ttNb8eaMaz{?KjI-+zE{1=cGf)(L8&Z%)W;3naRB>nk5*Y73qM@kDt}uX^ z(iHC>q>*^T*=H?gZ4%qQFqwn&w0`jtp_?FT`I={*#Hm+Tvp1?^#JmH&PF!1$xesk5Wx4r_3mncLbSUNnj zvDds^xEK&$*2{TMIc=VaYy1$WlY#x+wHRoUJC=%!*)CNx*YLz+mzxFC$Ib2m4vhRo z0)p!-U3==un6??cj~z+UtUS^`{Gpu2-DAY>56&~bZM0Hu4T9COCkwHCjBsno%1lP; z8g1kO%pp53fpvbsLNi%!qxoCi2o%mWO5C&6zuI?a37o&7Gq;Q0;{Jq9!WAFRm)8SZ zG&xX{=W+-FrKcD}fUkvq~YpP@Eh+ZsOm7CTfDsVtO&d)J!)7rfAY@dgjpGa)C2&(jHk9eZ@zQA3}aB zX$`%;c}OP7?Hb)LPo&T7D7$feCkUXE>FF{4Cv=el}mUhNXb1kj%84rNhUGp!k z_HRS;QD<7mZrBdamJ^fOU@#D45z~jJBrC{XoiNBRBx(5|dd3-Wax$f?|D|J@oU>38 zBz7;R&rK?Hd|#Rl)gY8btH4CVQ85?wKvQ+K*k)32BR>idUZ5Jux@|L>(w5im;d!$* zJmH$%#&2GdSY0#u7VkU!j;9_7w4zyfvgb7M_y=PXV^giU1kHJIDz-KKvd=VXxm(&x z?dBcyvBWJVgFfCW9sX%#RfA;r504Jkg-|u|zQ4a-JG-i}nZr8MQ+AQYGMwSxj_L_Jcm^1b zIdRjk^tBMHvfi@^p7yME4q7|Rs=tyhQ#{?#LYH^pMwP8s1{HjY z+DHaWJvZddV^gw;+GG@13n7ZfpmpqX$MygU^SQ0f3cYc}_wO4NVG> z`Pn)-Kftt%s_ZsF-ag5+?jL&AfU|AwY`5&~eI`@?@<;4ASZ59nKBXCXTbVJb@*Nf(^ zflb}%`yb!*QPJ)1QS`sF|4k2-5KQKG#bI;B+w)j^wa$u5pS_wW=2<$5+!12-J=FkpT{BjmZbB$k=oOJ66-EZJ zp~JTb6Qn69sGQ_$Trnbf=N1!JmiWO*Pcwz#<-hn4bAhYAo>e&v;I|65NcjM+7=$}V zq??wjck5JzQ6}H43%=ho(TEYTz96ks))2cq_xU{#k5s)^dDFhiwV@f7Frsb!NdJwo zggRYzy&B~(GEm_Cm8_4DEevJo0Qx)0PDm4=;K zc10_QSJ}{2c$z9+_~DwmQ6b|3Cn{dh&!Ic-Kyjj6(MKkSx{&{~;D*QPin8!@|9AhW{+_E&~7}4BR6A)fS`M1SkH5_ zWQOiyq$H(3Qk6A#wQY*H$QOg^6yP>7pA&d-P=%8|7}}Z;mru7hLLD-={o(&FfA9ar zvE`MkB=Rw^KZaxgc8~jz(B4qrO<sA23}G%~vus%!+9vnI}A;Kfg-I6@Td z1z`zWR*2MH^^{CL{5bt`M+85UZjsnEC)UjsaZ8r@PVvD_$48g=x$chhJ+QKOSQeR;@bp;ry+CW~ zvRsvazBfLTZYQsq)$7ihIQ~-mY_dZZ)OlQeAW3{HNW&ro?XYg~OUJ*4x#>EI^n(Eg z>`iHZ`kHfu45uN`byh%OI1TBzi{Y%jCuu1gYEyE0xsMkgs8@#o5nJUc>>B|}?8(w( z>s-iA6#n9AjwsnU+yrOunVLtvViz7`~aA@i)+;9G@YRa%F~AfA$mhJRg1V7a8w+M8)@=_IS!@{aeT zg6CJ==n6uDasu#l+o8Y$hqjk~fh=`mn%El^lY)zQ3- z&%E}W#~SjaJEh4{($h2Xi3R31#Ao7^{yUg1L}9lxQ!qM0I$N$p)s9)GjR5Vga2m zM`lZSf37e8^V_p@W%=jpYA0_Z<(G8k=@jlVENiG%Tfp!vKvfU>@r9tfw-iFxit?|? z7e0*Q6D^3apa-}$kv6h#`QrxS6lQ|=JbbsY^=FRzvsO@CHF=>wX^rDZwIcJ%cobA0 z)9{SW?wEtmij4lZGwv?mcIxQaZosVax?RmF0D)N~ov%uq#!o^=#rn%aP!@4Up`qC? ziq3>4O~}ML-oo^UayyL~hWJbF(HXURmSAVthfpi@cMqNwNpj0(kd*BTe@Ln?=?L#NjsNm31EI(NF4?7}>D&)~qNqRVR5U84Kj ztnO@ronR;Br;zC&&rpOQG_#)fK_BkJC6C1wgEzg=k{f^+c=f3YKdgd8o(mdO8k6P8 zxpqKRRtD(+jki^N@7NN~(6eRWk>>g{vx05VI|UNh$jEtC)60(CuLCavmk{y~-$VnD z#`Tu}Y`u>k?a8xDln==H;#dirGm6ybPT5MIUYvccSn2Nb2i|6T4pn6KZa-Tv&-ieBr*1fGHDh=M72SIw3G}CWXx4$^@0SgI zD}+!iy-&L2*DX%u^$75kfEptV@X#UiHwdig(0(dgynte+AydB_EnSOoRwWjFr!aO+ zy0Vs@9zjt+% zswiw@*HhYqPqGU(}`5JVJfnb5mfow-MLU03_qd-@iz1^=j zDCYd>HI#7$t=jCz_2JY%L72sl8DC8wz-HL?2UINx*#C@tnj2gfrK%(I;+d6?ctnrg zgg%m&El+HgtkLH|A9TKC_$TDZ^Lk;RBa^_EE(q)^bjwMtNr+FwI!M zi?Hqr{$@jRrmt#?5mgGGfUI20tcFkdzP zYnl>zv6CaLsiOpoDH;LcnmUAQ;U3>l<*eiOW;rCVDnd`=0Wj*=t@AG|eF$a%ke%lqMg2pAY{VEO5fz$`nCWL`%4mqxt!!Kz(bf%K z7wQ2kOA^nRz*6;9wNRx>8Qa7`&5m( z`bXNzM7$PmD!W#s_Q%tNCG=M>K&~Try_tQ1_0*q8q^kB>Jq{I&+LLSWQ`3d@H)FcZ zTX*dt!;X{e@W>|@K4oDFTlLV>n3-{$r^E4KTb_=oc`Tx~m5iFQFXK0YxkWCZ&%fS> z4bF53iO2<{|EH<_hjMAjitP`fk`3g}(D`+s{@n^eB<|s!Djq82{71~#dsoZ5cmKOE z(ci<2*HF+0>=_Kl1W&Do6->CB@zJfbYZk31>;kf3SrElentdz`3Fh2g z8@Fgm2PoM%K0L1=9@S|fBop|^NObRc90cg->jA9VGMI+?{bMpki_diTx!xriAM)Lr zb)P`I!PS*|K(*Z3aW~NZQb5RQu&fDqO_4jvh*vUuBr;Z)cv00B2Iemugu5K+ukf4T z)oSwUhRx?ZiEohif#H)}k}ScYv!u6``I3zA+3gbW#iCrOIs`75KM$yS;l4+}adxpI zzXi>-g*B?r*4P@zL2?#q2cG3Oa9QwKWH=*pGXMNjik%MLR200CnL>E+NU6sc0)*%8 zrbj&`YGe@i>qU$)+MSy;$?N_zaht9Q>KR~>nG<<@n*t)E0KKIn}|`m)#FYK z_`1JI8rhECycpnwa*gJ1DwOma3S0X$KTP5@{ByJ}rB*E3G52VxFM6dV(a~BS?VMa> zpdC7W)$u6S*jkCvC`Po%c4lve=wy5Fwrj$ukk?nQ*IscW%96rjZNC&uTn_}@9tm0q z9lS4tWObkoSNcK{rDdoP=N>?W7ClQbRA(;#3@W2R>jCvjm8VU_SQ4m{R zaHueIPTU^VO_&x(4?j}{O$26{*?}uaqv0oJ@vVbjFh{~Wf+pmwQ9c9zdVQV>Y=&K0 zMP^JzOaCM0xno+HdG$%De*(Tqppf(glRbPhS}pV`Q*>kKW~+bBhccLh%rzS2hufIJ z!+WZMgK(rs`BHFbfrdS&*g+I3U4^H>+h^=LVBV*F+82vbKsI$6h-GLhY?T|qgoLPV z{TzCcn{j~unUiv}oT*qG3_&u`Z(PY(ihri%aCA)k?5%otlEgkZ^h^oVQKl__$BewcmQ#8s$4^! zp4vJkLEi`PNITG1Rgemu0nBfE&<~=n)s~dafQIS_3O?x>AJ(s3FAjd{vT~PWNSPCA z)+N)~D`Xx=^Wlx3zAkbh1p9D|g{mzk{xatyeR;cFe;C*;(-V;~K%89F3B2s{fLQ05H3-x*4z!SaOHyAQox%1e3UTVdtMTdY_!ldk4O01|(Fn1R7A8<15 za1clCa(-U=&5zDO^B^QSROxVi6J9ENv3`l7;`FFV<-VEtg>Geo$zMeaDjBKjqBF@2 znMa@F_n%H)HY@ci_^qB+H{lulGNrKy=|EqfSYt3Rz>(>730X32t%i=g44-jo^dChzYzNqy8{ zY9$*B=E*j(3!Nscto^^(*i>E6IuvPJd)2|2_O;DjfI{^`dKvij43Yrt}ke8 zl>!_DI6<3C>y`)th%c^_ooecrZ=56Fi|tODubj|acu)*0IGzbGK75NL8=D|H*NRJqGEXCo&a7a z7aefyS>Y=cL`rIz#PhM|a;HfH!_tltSXCM+-(P*7)nH3q{$pnXrp&k1&tliQP+q+7+dT`( zF$~^@YXRxhzzE+>bL#lRwDK9$yK3+O)xqXU(`yC;?m)Z|3?hs}iKz7_oNE9E4r-ir zx(%X<<$8VA19=)fjIYdRszR!Rm?jL5#x~Zs{nP%s-Q;Jh?!8&;y&AE;eTd>23{v9s z@5U(E?+D+Ida~TkTl+F6nRszGiX&wlE%Py?4V@>@lE-M(NOc$tO8VFE%Go zY98A^Dy(5TG{C(}9s)^|eZ*~LH<^Qi2T?AMnBY+|d5jPYyUysNtiKTRQr4l!1}YOJ z?&YCJMv7JAK0=nbr6G|#8!(Zg%9J{ey)A6DZ0Mu;lOOy^uDVKO|DUf!0VsOSJ}?4U zyDn{Hsx&OG({O~0LBcB3W^^V~f9y!FV-pYLoD)|?P>W*iqj0Mg#i2v3M;$m>8K5^W zC87GHZ%VoEC~E5dLh)58VKU<9B{g`-Uy8+wxg5u?xu&KYN?HB4k1nqH@rulP$FQzm z)XZs2X|j6L-50{SeGnU4G8V^Hkq8&%Z~S_eDTlH|wI!Y+F6&_=6R>yb9QN7B#)VUK zlBwDO^ox0f8?!-{CLBSxv7~h>@KpSxAKGCp#9I-(6V81PMUzW9j?))@JQfqwlv%{J z-i;VctEs~2053q$zwWl2u*snHtn{{`brvIP6i)?<^g~f}As_mqKO%eV;g#ksLD5nt z%@+xA3z*gnjY8B0RgXBY_w-i~UckPWV3001K`R>ea3l!)A4C8+;b^eC4%sHk4enL+ zW_uAJzuus{JdXLJC;?E$53isMqDe*(jjjUse@5#HdGQ{na@p|p^(=&cHJ1%0#uESt zR$;S|_Rw54Idj9NqK(K$VK7%(4~I120)?>>b#XU?>fhN$2j_6b^}K~~VRlk1PB+1b z-jVf>6mn+5?%WR)fmLikH|w^91SaPmV}L9HVL?Kmb~C2nsqKC)f`FDONSD?&Xu-~% zqu(H}u^+A-<2_aAHl)omsaBacO+Y8S0tvjlzb%4pIJcJXGWK%&9r3%0E3uxam1>YO zu0}!SN(Iv(BWd)^2_4%sD%Y#fElIpI+&kWP6>?8knFe<)tMo%hCF2B$W{d@G^n!c z-Kv6K^=)_W&vw#uyW>aoOPE3IGcl`{y-Y-SwO0e zyQk?h3XyWW=+6SNv~w2crriLT=0xoD(Ws!?!8A2KME1!G3BstiJ`)f9FZz2d3(v?+K($tA|69qkW~tYW8!~?17rzOeWubQ_(hn18RfpUEIyDxBxE)!8a*Ze&?%52iz3t zJu9W3AnauYuxo&6q%pnYw)Hb`zW1LTGGqVDRCP0}3WV|63RLJ>D;fO*sXuaNnfTNX zdWMMGUa-`&`fP>BWA^$l`z=o>5x%K(bHG^O0aIfzLv%iTVV#vpSUEHHl!=K~AZvPO z^v=d4Y1R%WtND~MD}D&U18ALA7PT;dB7Y=$O@CT_$b%L(us(WxA-qRq!V0z;%Up*rsmkNYm;G81IF0VWF@#p?OfJ=Sy4>(g7_ zN=Noldzp%999G2nQsG|Dh)nldz40Vm6T#yFiab$Bbj4#>E*^(P4T9Xia>7IxY~+>? z_uYtRYd2O>ulL~DBt;;vZIf^A5{&9Wi|WHjPdegE6&Poa;C_}m)STylxRUjjebLJq zFRRw>Qg&}L7)38Pi!i8sM~53Sb!~b5X<5@aMrc;r6mpc>sR5ok$xL8l{HN(2lF6>; z@M-m|>{?T3WE4;X<@D3zs)=ZByBHwRgPMOq_I-opyupihqg)L)^G|3P2v`M z7kTbN7AM-C{j9jHtu_g*#Y=$l!Oe^e785M{eY$U% zY@VMZ@>D@<B@0Jwp@ z@inJdYNSsjy-PO->^iL82@U2*h6@QzwLGw!^sqSnkdS1-jJjWM4#_ryK&x5zn@Ku= z#Zb`aY-Rf!w~Z&+MIW@>((cI6TMtIIiY0PVTM)a-r=h5&ExSHM8t(kQ^ePf9GN$^( zF_NhRg9@YprCyf9CHTB@v3;BgYpW>4pzwQ~7-4ucms16s9~Z>-o(~u|QNmS!ic~^NjeF zSTzybkRrYt;ISaTGAi}xs~iDD9h>XN?;p%r8NfdnoIKzRBR+ZV(b z|4R0Sj%L;Qy{rn8S8UyWWM~5K;?u6ZE<7|M!c2^#tw-jGyu_pOpt)3LAeVzQEmrfe z(pKNetI##>VUTbu7ic@79As!k`2+-(hG>k_M;oc^ncZhkP(0{WxIJKbJ7H0hUWiTB zET^L=E>|`rQT=hK4a9bLR(s^%+jAY|f9og@XlJaJSIdz<4jFAXbX-)H^jEFm!h6^F zb@k5DlVQRYqOT8NkMo;-y7faXJY=o(A@0otgt`{OUx5wL_Jg;%E^Y+w2Kh6Q77&Fv z0b#ZIZfdC{Z&ri z*YT-F*-B!`3*D2umIQo=`2Csk4HXi1E6RsA&|)AQtCg37tFF!g9?u$+Y6b3jl7H|4#@2SO2FXdqz)qVUwU_jR89i=3K= zUG4&)OL25hAtjmNXUk*$2CLiSifjw@OH5XTM)uaTc!dJt7<_dD@y`<`&cy{69c^>T z_P`m710ihU8iDJK+Rh+16MH}PkhvY=8- zOMmi{`Y3L5C)B*$KLx;KS@WDgN<{M?U&bw?!L6|N?$(Rp3C`m5_7%ALT1y8O z$J}A2LV*RW`&)a~BTZ%yn7EnyJnQ((^K{D(Q%6O|ywbZD_dY*9{)ALH{!7Ni4|BzV zx|xdYp~Wxj`AC%iw(?Ey_FCH7Uq&0^99H{#;})7rG{)3X^EwFQTJc zRq-3Y5*~m!ePsSnh?iO%1dHhF}v|h$)Qeh6#6OVHC*}7R2 zHlaw%RmKY>j?}2AanV2M7194P#db&$QB$7^`>D+WWvuY2{)KCNWS&ya8|tpD!n$hxe6xW zM}7$tI8$b69t5se9IxG58W++L9^FoC@h69;6+GcVD~4|7ooe!gjKVQg3TM1A00ia( zsQ&JZzj-?$$WXh6-{8wT-36hJ6`4x42I|kzp-i6&Vu7J@g-x1%3_xu%B2%ELps(xS}e_v&d-?8i9Am&x047xIDO z7~DDLSiC?5{+y(WJ6C1E7az^ESt9az0NCSW&dFE%r~M7zmN<&o81j-utf_^BnC$b7 z)2ga$MzVe(9q+1L?D&#J>x3Wtrc{Q};dtc1X!hpB?%gIjll%Gf5UctVA)U+%PZ;n; z_^?a_$vgM+Em^r`(~0U19Zp`gGi~5A;D+hhf0;ER&+s$x88dS{G*gyWyv&+IO8K zt?qlfO;(Ca8E|=YvKDg^cM?lRYQq#;efkL4Gu{!coT~18X4i=AtXobVE)QlkucZ#k zCIj{BlFatzkptp11=%tA<;lN(z?e2^5xGEBoEFpbd3&~_NoJJ5#Zx--7EMQ_Yo^N0bW?T+fjj4L(-(Ck zS9miVr;wMS%jvh}&Cu2D2bku}DX?I#)~=^HiVC^B4AF4`kI$cL5Ny=sal*1OvVyfh zyAQ+o7P^eMnUgy{Mx;bhzCppXrf!Za`aGdL)Rn5io)h{2fLy43^>lm#{)u*SgbvzwCQ=b!)%dJGGcXfBp80mt^MpymI~d>4=Ug{wcV>qnyS>pkppm zuFlUda@Ml~(7USxJ?}O4FoNVo4L{TW0v{e~&=LHRuORZgtAJ2|2K;7?Wd2pW>&om! zJ?Ss-A8=ZZEie1t)mZttoo9=B6mjB)gbSh;A2B}yuW{;dOd<8ZEsa(%ZcDMGG5)WR<3jAfStVK{;1scIWc(FH8hJ zP|zAgMl{pI_yAm9#HzAC-hc>I$HZi0nvS~|g# z)yPph&MG`Rsx?d^b-uI;^bkNH*>tj=^d5NOPJG>i|UE$=8zmf2eLp*t8}D*klQEUwY^e_;?yj zbq~dJ&rwuCkK+y;uA(W=2!~z>v)qo@C#ArnHS;MkznePnO`lz``e^*z`AsW90_drg+$;2pkYNiowT zD2vy-XWW2%2axP+C(F4s)-*FV*ztZ%E-YPf>$p%Rr)L*GWHzZ={jtKF?2wAEjEbD& zh$rbw6X~Lws2{d&M%jvXqT;t)z(~&9)a7e?{!8-=APAJc@7wDNzxm9<3Qe`Hvv*B1hzJNEeQV5c5<52#L}Avz9wdo)Egp=p zW2!y>Rfu+e9o`u*Ouv;}(;1g}F<&;D9b17z)>&ZlrxB%0wK*Sm+Oidc8(*19GZtZU z?j?ebK|{?5Hg{Z>bR&jTahuLh9*uvf!t+iXooM6eZyunUj%DG(6>92OBQ{v~^GpIv zx!u?P&~WA~Zv@e*(3eR{Jf1wz9;2$X<i6J8gGUXz8y%QXo z5P1n$|3tB~*a5&kz&qS|oooQDZFU={@`Z!cO1ji4G6z-wvNo6kA&vMc~bq_ z2e!3#OHoYBf2%4U#2~yMkyY{q)evB#;4nZEAP?lBPWp1s$x9u}Hy9 zM4a}t#=!wCl>Y%l_bM5^tTAaM&gAVyzfxJ60?#t!Qn z#9IEyT`l>QyTVJ9pd|TA@ssuOiWMsx0W&7AIjfL zi~R;k!beX7FLIFN-hBy`#UnNajYe9Bde6w-I%3a{A@LJATk+gb&~gR-wn3>H9=3wD z4%ft0bcS);(mY|MLYFObdq>2vXrvj|xF#zQ14xk?f(ePiTjW2ACqF>SIP^Cy@FpU& z8m^MNHu8RYXtv+sT(uS$uL}t-c%>p+{}(34tt;1u>uIyEXZj$ND#OoGRZM!D&~j!W zqr#pL8os)iUjTjGUKraB8IIt_*@0RHVdfwo>8hQaZGal`2J-0*e}1rQmrs8~-SHs; zEiNVXQv=Z#@zPb0_KpzBhfYEb?KK=io-Hy@7f+aTxdw-+eF? zFPPK4veNtCX$Kiyw!eoORlAvpp81-VIe;WkWuby>1@JriBw&@_@@tm)%^yoD03%QP z`<^5i+`|-?jgWiyS6C#L=lXLRzL2*OfxjH|4ctjQzWE$s4uy@9T!+mC3^0KTgnqHEb)GfG(%*o)F^O`Xhdu{m`^LQL9{n2~6VZ^twS_9GO4 z)nA6YDz4V~t@)%-@Jm}Toppf(gj2(YXwBlbw9Ix9s5u-C$W#2PGsVBl?7A@LO5&Ba zXgLz-w_SLZ{D(NT(+-c5rHHDZZC`ecx_~m^T1=DSV18!6-b0>@G@Ndi0QYdE7DCmht}igyJs)}Uc1`rc3Bn_wyavCooyubyqzD}bp(K$7S*`+C)!y!V< zD?f1pOdgkVAI@ukg(!xgRO!cVH}>o8MQY${NAQ*$@$j!LG(a9%N&~}I+Iw-aAbfanZQ+gzK1PrccF}K9MDjacfk$eJ*DddGqOPwP!y;3hCX-dEuJ-+ zlT&v!=MfbY0Sq9Pq0pU*k3fmi9?S=_YP8T@v-R3Eb)Bvhw2Z0iOmz28E{rm*!mQ;b zXV=})vo~U|e}pq9uo8h;4+gv^9m`?d<9hWTHwE+`7JtNIMn9p<&XvBbkPYxRkKICElIw;t6E=P*6&%x) zGcs#mT1kBPMWY_YJ4pcuA0#c9J!`Q{qH#Ux4ax_l3DLNT)`Nmp$L(1p-siDu(TlpGZye9ICras|v}sZ7aW`rhbJ(^2wz2gEU%OB3{&4tnyc znK5uS2OVsD{sY*h57mf1lGL))UvK`nUl=#BWMWUYAFl~#0!Yv)ij6wRrT3n@jF9v0 z&j3N`Tn4Avlk)t!G5nxTQ$msyOC^jBFp#sYd$&hpAeL%UT?1NPSP$9ON69Y$ooEBG zAwZ&k4a`V^yToNGs9iW4<;hsujYE3hDd7;@#R17?nzd!ZhdG?DylB&CvG+p~nK?t1 zf5DRkb%2K31t3*ld6mH=eeZ(=rvu!y|GZN0phkdfqXRI`dkkZl^8>AkHO2;IYBYRH zS*Q54C&6M(`f%Yu2-kPENH&O5Y`lWi0 z%Cja_($-~CSAM~X(mc}01IHBwx0b<2OrS#uLPGi#%I-XN`EB6rIQGuW3IFWu@qu?= z=LmBE1NttAc-O}KAewL!Tfeth1bvZUQ7Tff)+h9p+OJqE_u&~@(|b2 zn!5P}sk&>5#SFuh5-Z|8LLfcPi+F_YZdz0&RxGW=q4`f6ZnX)^KB9dE?;OlcJ}QJy zsj&(s9$F+>N}VGf!k*lWAx#-Jb`wh5>tCB2wKbR;!j+>t^41lH#h&7Z>s&O@w{`xP z{8~E2uj?sCvQt7>@^`~^Wbhn_VjD(sSl@ySLs011NoX%ND0=%5zN{j)bd=Hv0|+gh7Bgt=YQQ_EB^1S%5C|62_%s5cG9d zBUPv#6$t%qXK4+$&36AKdPTcd@HiM~%e@4IQ~q1_c0Q>_?P$mC9eJ8)aUrl{RA46} zU)nCxmMakA>+W76!k*k3pSB;^JIoLxbtR1<%-Y^l> za0zhpF!Of@4@p6Z=m`}-1DzUM7gk#oY5`ROQBPa9d&W9FL*by zZDP$@tSt}Zl{(8{y0Kq6>46rn zzbgpzXGiyX8br#v@p+4jw*IaHm?rjaBU{_P1=NB>5GQg0I^1G1u=51tWAej}S6Rqr zrT2cLD)sSq-NZkI6?9|H@1k6pwTae|re6WceNNJ#gzByXPm>2%1)Dpt`vM)&SA<%F zfby66G`4g7_N9`nRkB9?*{QpG^I6;{D$&z|$-{Pk3gZUoCu|8@ZO>nyIU8y`=^dSL z5Dcy~Ydk+~G!$iX)8Dx+Gh_fHL^7(l&Yo6*qbH3iZ2Q#U=ORy@;K)r1p^BA+xd!^e zjy5mGETB7Pghstg6-*cSKv(;~112I$>EzQeOZ|hH&k&ckUZCRyG~i!%wbG2rKhHVy z!u+9o0p~@lhz)VLfG}c-DAgl`G z?pHTx$~YfX%fS1cm76&$)}| z2KNAh9w|g-=@@YE>i$AH%mF4&mGMY*oDnpi_{=TfW#h&2Zt**BdL8fQ-*|Zewgyg$I z2q>;ODyd7qk4u1KbaJS$XX?zSoO@ynl@w_7i0@(eOr$bq_5rlHGM!)i6!cu?K1~3A zh@eY)@gh|hbBxQT>we7F*}3;ipK~V`dh{-84Cn-u!$MSD-5fsV;gZ`>3>QbK(>qZD zj(9lwAH;ws))Db&E$K$Co05F2 zMk~hD9>KGreFsi|;#EF{M|^IV$B~(`aLjFV4xIaO<;O3CRGU5%L~mqAGR<}fC8)=z zMOF3{GvCNFm&Sw|l3^^qsU(6e0&BgQ`fI)K# z+TK8c%3sNL!05;o3nbYD_{@LLnQp%jL5{rYyVWR#t%+l4=4cjRVenD#!$K^cFb;ye*Hn) z+tq&McBj)2N-V%z^~riP=XJQLNW14Z$zF$u?P+{vfW4kCVXA?<@)4KJ292O5C_n__ zMzLF7=p@#%^rN2>viusxBsQWGl?NNx;RK=~c=5S0;Y8y2l!a9Vho+Y2PzMK zj$10F=era+8ist~hL|qW{AUoZo^FtwH-2_nVY1z2xf>vT@9nU}0UYO*n={qay3z0- z$j$CqDLXl@8~1>U6RFIx#)cJNk-ra>{1LV3#f_ozBIjqXM1CvU9+`Mg;K(?1NPtHxr?Sx4IL+IOXnF{y&(&AC~p{X_?s z(1YJEpXOJI{-Jf%KQ1%KgOoh)inC{ZE0M=Dc}kOAC|x$fY#p3y#l0ny^w`uWglm^x zly)$He9)#=^rD%^#9Zefl7P9pJR3qVLs%1ruD;a4`;k_j<^e=?9AS!T3FZe-k^Zu? zvJOMDmyTVMMT}b(wm%VbwvMKO7XvdoIe!G+eUW=)=lt`W-ryR zv5IE5*M`0l`OQk3JBV36G2VCWw7@Lw=jy6)BEYZ-j}!2d}` zJ|2d>A5KIaC8rWXA^5=Uc~*+$45v1u&?0en;9%~~^+I608C?i9WwP@2LSVIc zR?qp0{sB9$w#v-3KQ9Q>@&15q?;?5#qK6NDa6|?K>dt%D7%{q0V@R+93kCMRFEpl% z===T9jes_WB^i63)K&14Vs1e#x0wg`a;kF0@&!-%Z|8scKwngv5*4|ixDMH%a=V+S5# zW<^7)_Rm?z+UkHDA<1VQLy&}H1=#n`Ir=U9}YND=}aiT=m7+>P|(JJe2sU=nC~mg0n+5kqYTcDN9QWX zV5+`*<4<4u6VMEG(4B!s8Bq}V9n_vEZWz4^lTq6dq+{IX)?`;wm(qM$u%E3RTmg9K z!auQNbC=*Eh_b@j2+(IYIWeT+n~g2BvJsn)nFwBm_}1d0AApXd;=4ns)AuWvLVIJI ztN?zbscSB?Wy0sA02xFY#}B>3o~gOAM{G%e_%4pp>3iD>`hJxvV5}FwTXjze?l-M}W5DhtIYDnO-%t#R=a$H3ta@ zgvMg2aT~E1+zt-${+ZN?n1ugm3IT(8HQr~ft>4>8>cdjjJR7scIX=507!!`A%l_bz zf4P3I0L8_5Ah#1*P1y>~EyVgh`3V<&Q(_*yO#p6NJ`Ap*_BCYl5^bCMdzr-R>ufBO zu>?dgkMPxOek!LB>;Ir%{ebt;e`wsP(!Rs*KzKzEbo@OMMkKSUVWG(0rM8{KT;*N^ zpw|!5@x|ir*#jX`Y3zai*iGlfi~9?29;f4dIfzAxF{g!O3AV;xNoMQczL=FZKGHOY z0unJ1i()~1Y{3cNR5TeCJ*gY5YqyhE1?8dWh9zvFZcDRIOp)vRjx0ojqG_MoV+9*1 zSxAA`(Bp}r&9DBPyw=@X`YqsG)lcDcAsraLWS_#5ZIYg}#+Fu!idCAyK7!%Paw98h zmC1?2Dh0!pMEM*B#Y@RfpM$x)&Yk@y@Z+e(b@1f9LUTt)!N*a181WqYD6<2f-JyIX z&6CgXa_~3xD}zzCOmDBkf|(Z1GhsqW#4bx~S1gje!Rg!&@9Q!_Znu3Fx!!~F#AS>& zmjRvK_A}rvULjLE`T1g_|9L^OMXzyJwdn-0hSxR8@DQp)fGK!|a|nrBzAJxWT0D0t zYvJegq@^t6YReG8M4i6r&#-x^VL?JKN@o69b2sG>K*y5XG0xR76U4-p!}~v2fOYKdYRE zS}+Qn?geARz_ZM&`$*&QG?DAGAPG6qt$Xx(3C^@Kj^Sb^^`awu%k_iVG25@iX2We$ z-3oQ+B(OsNdcVXE4+}w}+e@^_od!s)oW=(iUEHbqg*sY+{IjCJ|IW#QuI_;^Hqv^k z-GOr)XAgv)@3sDK=Mz#*8zv%CftN(hXp@W`z|r|*<9PGYT&x|1f-98BTOIKm$%NA7 ztJsnC$R*Q=XKmmau_~@KJUvaz+V$uU6a8oP%SiqXACQ6WrcT@IK29I|be(#rfkbh{sS!oe~G#Cf?bMwM~LJ6%OCdm?83oOJQRP}ed;wxs7^HiTA?uBxpa^o{{$M|2rPws)vJ{o&)> z*m%sJINdi@ooI}ej6;zJsk<23dtY$-Zh2RvLew`2oe+n5C+qDq^Afbd#!>ISZWf?tE!eyinP>jT#v#| z;RHLElP8f3TqVq~^voH#FqCPp%Nn`A=)KyM!@{y0w0$eIF0ild+U_)$r^F97 z<{_F?&8MNmf2`da+(SjJvM9rexxk}BYfS))DqjBNoU5YKP|FVjC}agT)wP;B zz&g)&>`rigkQ;s0cg-geuoX#X7CK$~b)+KxmHi18%p)Vt8x7Qy{&f5?Odb8OO^aD3 zs{2|pNH91u(6zuIijXA(LhpQZGM*HrHVG=))zd4}DHtg3%@RHeas%uoxrHXpI%4)! zOmb2WPH()ks&EjNxwQnA6q9#>ZIJGcH~{sXo|bDlMH^xQX!4R1Cg|6MJs6KyUG=O> z?*9|iC>YGitx1M?qfTa02<&bW&R;(bk3@FxFdEvY0Hl`ps-C~1ZL?z|=Buwz#khXg zdjCfb@DZZ6k!~3pH0qcdRXkUgnSO0*$IIfm3ilF(bw+-wNWtn#3{b%F3-J6}X4(j% z=3v`MDfbh86GpMe3?^YBfYo;osa`O)0&G`c-gIG1tZ%(fn~ZwTECOG-CS8}Hx+(9@ zs*t4T*KDg-dXb({5n#R)3Qd*Jg%k9M}mu|C{@weOw zJF5dd)x%bws&@Hl?Q=@}kwx8)wDUfs$iW8;v_QN0JUh7>$Mo~yn|`qZ3oqfu>+vrZ zh1`c;GbuAph^MGMijr_sVq6maqMTSL)%BcU(yE}z0HM>#qD*jlSy9u^-P@0Qm-u;S zG;kJN(n2$!Uc&GKKB-#?wL#x9t>1jQ^3PJ*dEXg2vT2#1$1j?jMZ*4bdbwf`f+>T> zsf?8t=p~h*cQAGD?S7HrD>~vb1>Ukvb~a8A?U$^18X5An+tRH%HQR}uXhwq3x4sa5 zQrrI8c6nk}>4NkX1}Tm!7Hs-kV#Hme3<(H_!n6z(?2!jW2@R*Q)_-gtqL7X4kk+FB zbAY*6Ca1D|~L9SPJZnDc8hr*s34fEt^ZP?fA6Z;6| z*Z7f*X}g|Kb(q1dPc2cUzD<=izQk_bY9#q7ad2N%i9qUSsypAD!#Da`bR1~hS~n;B z3Yrc-Ad(FkORYU5^&=J2&m=!U1qnxmGz{2!5fkzK{TMYfLQG zl2o-Ik>bfH=xw*ZU)7uZr=&C}C%ms}hWx7xL!t`w_!n5<^A>1D!sAmC?%@3^4V zUePBz_3lg}Ucirlm{He%@V}+!@dY~-X~k}X61}``^$T(Qem$g>C0x!n$u0?Gwzvg@ zokIj@&&a;=!FC5ri8qFD5IR(AdL@uPhbbw(%nKY}4|fCl8FwvlfJa{{;Jc^retcYY z`+3X#>38~jRG7}ksmbFq`LNGFjQ;(STFR0wxrC+Y=%*?Fs-KE(tFK~g3y0N|Tab~; zNuV3^Kt*C>-%%jDPz#;RFvm@iHu7u>OweJ>N8Y056T^Vl_wJlzq@KDZU-b00jD_9Udh43HG}3scJKhr4xlarE@L) z`1OPAa-68>50Gg{Nq;#A;$b2C44hcdj#kis_DGrx(}?0pmSD$T<$jy zbFE?=KK{%(P-$o2nhN0xkrU+0=?C(dksXG+X7&Tm@0~4>~&Fqz1 zvh48(ti@_qL=>&nZ3gzJr z$WbWXiPz)-3Gt;ulZg~wri?g9rVq8zpv+>zb{^FtO`W;PyP5*7oBi_Mtb`SHaLg)f zw|G@;-SX^%=BeB?N=4Ekq)(G}opzW~0W!Co5${aG)^io>fO&7Uuajo(b-ZcJ1Lv?9 z8R|b`=v(1{VtUfsfg2T9f2m;5k$$bT33eda;>nGOz{br(%2yG}b`Z{D%fIT1m!SLS zAVw~S4k#>Y%i!1ay<=-7Gs{5AM9}E1Wmv+^_ zsbaS_;yXeAkN!rm?7U@tslY_?lc07@zxo<+4D?d)7ToqyzHy3(u32r`sc;*#|2%Vs zxme$9`Wv4i-AFkKXgZ4rR^{Y998x>G<{8HFlHUvKKLOEh6Gy*u6YxX z_mvR7nQiJ^#>vGME`BD1k;f|k3Do<1#|KaPC965*CRYB#wIoW?AkBxgbpzk4H~jst ziPr?A+xQ{R8diy{&AbO7`E7uMqFLx+&`bdRV)>NV5U#X2PBYd2GQpoo0ckp^cyp=8 zcpyIqTEy+a!Tt87EoGE$j!@Oy<@eXIC5iZw=K*z&z{jr`0JkeI*dBu4b-3`Y@^wZKw`@VTR9N2)E$G$Mjnb z(m3jK&muOHkc%*f+zMxd+B%98)I@;D?jD*o3oDD+sNA7(?vDXo0w#Te>;L#pAhum9 z27(b!kJBfXp8*rJ>PhvV2&>p)g>r+Owcs&?XZWXjI|?uX_hHhy`xO&b=K+G&{Hk-* zGxH4Zv=x=l2YkORH|+FUizW+m!Io9t;(vcPSas<|E=+8>x{5Eny&JgpM{fEw@9Acc zMpuXdxvOl|q$8_8ps@m=E!!PbkIGS-{RQXdDe$Z0W58Y}ewb*L@6edmO{g<2`jYEa z&kn}uH0bzS#DtRd0~}bMSstRxLl7oNg3iN6Ae4X4%BW|TjRc2*b*7n-Z<8yI>^c_4 zBaeEMj+mPAdVr+fpenN3qZbae@Yj8cm3i@(IEM)Vl*#+5LPOP#gkVLx>2kVFL1wiz zK#n7bdU`ESX}&Gn+1aoyT%e;-1cKp7oGP=;)lGOwE~|~$=AMyx7>VrC$pvqJ;-AJ9 zQGbG_so|eMxYqa!k2BNvRm|)+^?T*CFO!~xPomg}dT-p@5ljHe&=5a+o|D7?P}m;UDzss_fwTQu0?6Sa-%^-2)~qoIF}HRO!@-0?;4Z_n5aUL|*zB z72&YLs9<)U97LH&Je?@C#afRxy|2u*$9fWssL*aOmum1iq#UKHKS@>B!%(~3;Frg% zwZe=6A}dNWmIK&)=O~FHq9A~Ome3Lv9mEYrPw)zF_Mc9bHp^8da?#NO&&@+(+)J3p zO#~eh31e#BQY3iu62|YF)fr{unN`~h>SBPOdIHr>T02Q8Lf1X1JjINH`L`6&4EMpf ztMqq)g<0%MUjOniep?*d4q%h*V0L9@mn?M*IEUXpZ}h(xYt7eLKE2ek3o3_qqW9eR zl_!!yU)EUtZzd$Q|L(hmVi;DIHE3>vj5u(`XE^xBtAaO!myJptvIq9g+F?6m8`ZGz zPaJT6vEIiQrP3QEFD4HT@3^lxuUcV0-=Ro%-gxNmYHE)Sq8DSU>?crM15KH&`BUjz z`{oX$9~o9GnK|2-HJ_&Ke!sy1rq4z&9ZZW zgw5t6?>K$&`woy=7Sgmp5dgHcqYs|%5rxA*D`>A7K{OF}= z(j3fNA791!RL}|Q{2%;f#a)E{0|%%{Aarz#J+6_(ry$IIJCN#mT5E<@_en_lwvH-I z1o{bm5c~kU{;ktTb(Rl>I1w4j(qi!W)vku-bt%)w%m}SpYtWK=oI<{#69gFLt|wUv zyN7w@+7EADgnEEPKH#C857KCYxGFK%QY&qvnQylzdG+mjmZuh2QT48G*Wi>oM#RdV zBPyi+q@wUc=`WU;3XO0&MCS-i9#)rq+%}}z)ygsD=!1Dlv>GJY-q9KNyf`c;J*toV zhi5yf@dZXdrk^WH2xA+QCp}k>)Oszkc@Pmz;k#g41(#Q_#Scp@5t{YB$oh_ocGg?jfXG7&W7?(E;{~@4CN&S9!#Oq0T1Ld!?Q#hk!Xq$CX{CBqhL7U|8IbVg|tOW&UINnnA zeJAT820(w_>z@q8Wg;2-iHV_mXOY_7TXl$&1zUR1PG?zME_`|0{_;^gWl=M2WL6NJe* z9fExukgLb@s+s=UD|o^pw|Pw$EJ?A_{I&0z)|9Wi`Kvx}t(Td<<-Dlu-H1&ey$dcEIaaK$$hhrAt#}sx z)Lul+7Zb3O^ECgnAE=R76-Jjd|3zUo^|KH)0t~#*FeRwf3gd_0uw6nmFj5c;3)>j< ziE{l(Y1Rq+SVz2TyPAd2Cm-98QIuff%tL(wh@G<{Y4l(nX&ge!&n?F5I47jA8&Ug7 z%Mo$N)2{clGhY4fVJV!@U+PPbez6~^u(+Q~a_o{hG)VAvs1V$aMh1{rV{Luz-Z6Y{+OV@}Sn45bC9_L)+%KXx}U zGPms)U2i3voCl)Pt+ip6I?Hc1@GZ){_ zV1=a4!Vdt}B3Rc^&7VX(ad3>4r&u{V(GL974?Tns6O%-WOx~oDzmhrdTp<@3zH+tn z`fZ{wIT`_A+JvWV*4i@696)osoo*|IbvK1nBr@cmty<(bFvUFFJh*sVRMGHj#^psA zn8_FQkg${FtXE!#E7Xb_H}pzjk=P^rfL!Z1I#k|bDn zqct`#r9s0fdfi^Zx@g1M2W;jg^E{UiJUKA*$+{=jaJg?Po<;)!5Wzh9FqFi-Ty#V( zfX8s6`j_BDo~#)ji}vOTHaB=0wfBy8;SE_bsJgVo%aharJyA<-*qq~Pc=geT)Ox7& z@h6?`&7~OyG891ix^!|uZIm+wL8rH_vy=nDv)67kJ2I>Th&vR@(yJ7<>tm)g;l3Y5 zP_!F5XVf5kXayCCe(f&|tb*AqZtGG5MCWB4@e2w*HrsP;d&|&O=u?6e6`p)Rb1EM_Qnp{wYm{a0U4`Fas?B`Orc zbM=;6=ZgRMwU!0NBBRRSUpjI2Q4o?L6xFqN3q-o+p!wKz`Pb#H!MY zESTZC()2{YA`i$tZV9e~hzAhr@~xSsCJ;lsL?@~nu_Lpn){iPLoWky~^>>RJT%D^U zji3fa5X}u5-PGpvpZd0fCbSC6XSMQT=gdwj_Mv}C5p-y>ga4>gIGy#o6wn#&KLy;} z<^Oy4{VHS;jxh25=YcC7AuB~K=OsVBFy;;kFEn28)?8{ z)e$cRvbq*;bJYn0y$$}j`f^IYhMj6#x|`KGHwe0FU~0gl%yX(mt^OyodLn|IbpVL( z@*Q}re3gxZi9F57szVHhiA1C*Usd;QA(fJqf}gd;aS4~jsLoJokoPnR86#~Yt{N}= zQbe-G%{5KmYqRox)nL~A6cMEag&~RFhL>zD-kL1e$E91mJ14>!%dd=f!!Vobo~^F& zj;HaQY;T4+RLQX6xoGOM4~yZ=FLIl(!@RUq;aB;}zwLhx_tFvPi6e=-4J@v237^g= zZv9jav;(BD9e4>m8)snc8=O*C`zcg;eFK>3obe)1Clmew#bjsCyEST^2v!8NG4s}- z$7A^ocCQCOVU4%dx)15F*{0Pwe*>`8Hl-IC1^^3iZMs_}hJ1U5B zU>P)f$A_iB^#uq@3oC@6g9oY7*~vz;3*iHStJ51b!UeX`C+sBPs_an`fCGt;(*TUC zpi~l*BVU0>y0Bb%$Gy+)VJ(dGWFr9*>2_PBLKXl&hISxck%rxFM>Ogy5yi4-$c}YM zYaM?5>$Z$Y^J%vI=Z2?uF5DY=0rKSWkB?&jIF-0t+N?M4f5ZH&&hiuElieNeJfi0` z(qummjs4~fu@bUHSx<=yTC3VA2AYe^GaRPM*M8z}W3k16{Siq^(__r_$wi;#d;2+ZdIAw2?t zzxxp2Ct6a<(nt+$jxOveDghCP?qhV1tQAdxP=>e!N>0HY0PN2US-;>6T|SI|+D z=lDcVSfopW^>0mhVJ>}Ga5FGcBbk!;pI(nVfXiRPABTy=z~iZd#sY`xIZh@)>;6#3 zsC+}B%y_&1W_eu|=glWi+4XfL_{w<9Der1Q%=SLrJktouknxi3F~f7naA zgx^w99YUHJj{=o4t?CZcx-;md@U~7#uCDOd(&htSR82eDhZmkvM)Ei8rkVf*^x*v= zKI(d`FfxEctD4o6Wn?b)`?fT1>~7EQ*FNI<5=;Ibt-!MNZd%wp4=7!NR(THQ7m|H! z08E5=AZIO&HSdUgHm^5`Lt)WWuQ9dGm${M-?nkm)2?5-;mn~mc};iL z%2ld)Cs~?};t#o^QQXN;bpotDdn$K{Du2AGBdcWV!qFxdNYGLYXfdY1)y!O7oX$wb!}7 z5~!|K@nNvqlEu=OP;S6gvorz)7?H^3b9N0Cd{WjvRyDHJ;f2D|Y%g|WB7WclG&F@W zE!9gHKLNO&m|TQeE?R#9m_}@uWssjF)kT z++W**ELK1xn5YD~f|Se3kLdtTQ@9e20)*9=C__GVuOr|D1EtkfanYAVpo14Kpu5M+ zD;CYx_h~?YKL}@t_*ZFInyjQo&*NG%iUhl}(GkPKFKjP7EP(inLjP&1!ihYSsZvQBArR-S^FEs72Ne#fm(NvdGxu zcz8N|Uj6_aBnOj~$iA`Zm0nbmDpTiM>Yik4df%W3Uw_F>Z9+m^Ys#Nt*X7GM!9VmF z^!gb@>8DLb0(0SrLBT#pUs~nK%1c+r6FO2hx}(Gw4TBpa933q+$7z(|8gU8ra@NB` zW;&pi9N+Db{?s7}2Gv+}!P@rY(5p0n|0~*#Tu~s-=W~Prhnmzqq+v@T%u0GWJ$(h| z`sS9_l()c4j$=*D(OmTZ%<0#+b2C3W=?<=s`wIkjb9jHAue?>H12hPVtE6=n(`za> zT|e8r%6Vx%^QS){5JzTu+D*h|q|=ZtNtuNd=}5p4w!;O~h3FDE z4iq5I7VQFHK;>9@-y@%ERK4-xaZ$~^gzl)PC{l~D~* zW?11y-Zy=|Thu;^(*I(djrlhdGlF8*nVG6V6mbzh1UpiA-T$0cuVD7<3Am zEIScewnOF>8Yd{-ZZ4uxU z)^YPiZE*l+P8T;m4hPR3L++!P8Rm}%mfJ5$;z3c-yH9+nAg76(SYt*fb3GNMidiaAst+OP;?g@Nb0mq;1p$n{e3u+!mF*2abK zcIir%(!n8jM?f}DWK@p*h_hP;{>eS6ZX$|ICC6Hm<6v&2=D`5K$|bA(LE<|R4mz8Y zm2FtC>0U`NjN#hIPFd_>9oTw%3{?co^u>~Z?RQM+Jn6=w6W z%gZ)$Wf3wklqOGC$Gl^NdZg(E47r2rf)y;pL6;~1CKqWnn6TrKA!<9@C*boDkA>dR zET&*Y%&8Lrk;Z4W=1C|=nU$979WyjHnB0KovMhb||K_irz>s)FtEd3lhDqEK{){o; zcv~N)rd2wRpupLHWqihvLIm405Cotu+Z6p@3mO-1-!@S>x=-_=OwZHrL4IYZ4*J7+ zO>gOd*v~qQ*VBLPc<2PXS$uIBG8bLlQwh=CJ*5zmoWSEpNhwDsL=kqSUbPTZW^+Me z%_y#mWl*)`^3J=gDA{Vw)*N{2#s&RY&BI8aamt$;Lb8Tp`UE3ds_ZWN;%7pypyuuj zBtHY9bsq#RN>!oe6a1+<`^(nVBa+ys1fxyZp92f1f)rEk8J@F)S#A9nP)`#naM@6$ zlzk+Jm{GfE8Ym2kVd;N`!Cd+Qiic7idxo|5yfke^CD(4{v5t;6tjio5>?SsIqj(1+_f6gIKj>y6STC8* zM7oV@R$fZS4HFyA{S$6^)_*0}6SR=4n9Fa{i1(4^Kpmb?qGI%<^%g-Dly&ofMGp%o zw}3BRL9p9jpp<#`qlR_*OLEgq_@=`{_&l}tS(|ll%^8A@zgs$~XcuT`T{eTPifN_7 z&qRZ@hovjq7}3y}$TZ}ykp4D*P1$IO$JSu)*gE8lz|WQ^I!-sar7e?=+L3w0l5Mh# znZQkI0$+gQWUrP0Hpyu+A^9yj)Ec0I*as`G6o{32C|5C-EDuZjS)afqD29Wt1Y~0( z`4qR$QcjXNQtKA;C2xnK+C^zDn{aQT70U8=Ptd{UCo5aE2|-$52H|1KTkEW#KCtRv zge+?39{bbrfwr{55C_QirlunPgCqH)#!i0=DO_FQ6j(XW<(AX6}=*kkEGjF?$I z>R;AiG6HIa)(%=8z~rYSxj8cmt?VjD-#VE0(nae{ab*F9815h}5W%aiJ^bdV*$v24 znCfz|hqB#kX%re%eUHB!s(MAgP8Az+FYg>f8(E8~X-oA@AE}!-hNY?EcM{R7MKWkX zOj3$uu^HYHm?1%S~d(xi!=R;BIOM}~%eSMuFHh|(#y@L4A&?`GsD4!&Jx5eM0|BSsOaW~j` zSSil0#l4v=6TSak=w0rPiYQpl&F?A` zYzwLilzfffp`T;eM<%65;A4+EKn(JFdnHDa40)2Kbo`eYWYy)>R52@kFs~$?v9dY` zHCDSGJnBj!-Bf?xn16(uMC@g8YK&xg&pikWXWQdH%8J6}pI%1Es}HzR$=Q z8GWJC`VYsnOuy8&oKwVnyqAC`+ww_W*cCXJ&|%o>uT1ka9?71NRp+OVZl%ACw`cV} z+y(ZO7u)f|LI-<$^OGOv*&BSBwVq~!a#1F*Fvp5$3W9sX8{#3KK{p&Z=%KRLsORp+ zso&45?v{cM)uxN%zCrI7jzRbt=P1J3!3XJ|!#&Su<4I#8Kd4CiOUY8zuGHs#(?#J9 zo-^N`MKLQexj+#Kw%7_bG``Iq+qTZ!%(FV@>2CBq173v z&doUPig0VW^Om~9vyam|5`vyh;% zQig5Wez7SrSON`k&CPXKLI!Vf0*Mlg?+$e)xWci%T+Q%wsC^Ea%}qWeCMLD&^HXVL zu@1a_GU>wwiHgiY@kYY^nLyKPXX)cnQJ9K`k^b zh~Vbu+6jcmXsO~GLQd##C@XUIFk-N(wR=BKj}j0;vm>1PpF`X839|D|Rav`9F}5 zJTYsy3kHL2mE+f=PpeR|rD~m~Vdv&`C9$#k`t7=}t)rQ2;+`R9IR0l%HrI;%E9c?o zhgiGYc|ARB`?iT74qVcN@cmn@V#)-_y6E`*(kNN>hjJ&?n*!F>CMV^EVQTKXt%}}T z^a5lnUac+)Tvv!!AMDzsj#btkhS9r1MbdYf>i4;;YSYa!fY_eYNzN9s3{e=>Y+EWJ zNFKDTBPs>R+qzVcxM!id&uxqlfx`@Y7_`jwv~;eS$)}&_^I(PO{dDe8@UA&A#nD}K zT67%o1*y?ZZQL?C*qDa=?9p$Z2crRD#S8#T2Tsn6uV;g@<`g~JsyJ~wwg?|EwboOC z_c1PPZf~V!%lPHXr!o^UpK|?lP=4j8OQz8YY%!|_Q|3tqOh5q>Mq)Q{#iM@9f}{Ij z_gfyURVHuFlC+VJ8%(k<7;i+5o*Tv=oH^_O!bm_WbhUKXE`xiH zzzdcK1qpl=j2IPybld{`;f)li)Cy-&@(P{LM1MH;Evneu0*Q&T{J?(=$*>G1-?%#R z)b~jq9YaILPbDi_^(C(KNKM~AnOS!a%`Qt*g$?j^4Q2RstC&vE)tMe-aTxG&wu9jA z^=bcL$RS1^$x=oD?M)yY&>L?33@!c?UQPxHtph+1W53O!9tkOhVFZkQ>6PbSOzbvLGo~-?f1&MeM|Dj&>6WIW3{rstyS}97>N~3nCyZ5okD-3 zN@W|vIqvDK7{@MF!5VP`|t38ynh3Wv+cA0LsCmVy6lBTP00valb)5qR_Em zIq~_ic;~uD-yI{*%OI#!`|Jb=0wVfVoAx&^uN}aTjmbOKOdOB^ng4fRE_;X6B9aN1U3l1gRMRZs8GwQ({LGo6c);>=K)wWSr$sUW)amQCay7e_!#1BY(P1!+TXo6>)SC^# zyAE9>&1~quCzS1uOQN&~-H8ss33qnlTDVt>u^4tZ#&|;k9GI5mU0o-Z&~Xk3U5P29 zt8jef&{unC2Qr73(`7TtO^mVcUHDtarc ze-J(Ss;5N>F)`9ee8@&i)gP9B$JpF_V@l)lokT}tZ=1pGB##aFlX=VdkIoOq8?1H* z&zvOfNrPa2l(-?^Xn|A~MAfJNsK$Z*8Q-4l5N9#8BwEl)tB}M<0-6V&$MXcMdzm3$ zHzx?wB+qs*5knr)x!u&Bq2P+ZZNV-%6$AFdv3{O$o&jk$EGXE$(z=yVVs*&EyNU(c zP$r?TTz8)0J%utHzaE~0L6Ms<#Ec7BGM~QpyIhnK8_VbYz!0=MaS;dMC4>SyB6lt& z9OuXYn$qHd>7^W6EW)R|m8^~{qRr{GNycauGr%cLIn$zU@J_1pjMrbRHwRdcVA1Ca zIE*{A2zKHr4Af{SCNmb%B^npn`6$53rm z`d+zWo~r7Usa0`)rb%Jb4dw|Ajv%9Q>IXcMbnZyI%e0OAZFkxeLAn)a3f19l@B>n4 z90Co8)u=vd)IJx=Hc*x!0PD;0z@bSSxk*z^_imOQvzJR zrUzXe3%H(}@fNMZ>B7*YzU>a$D5#Ki->6}JO?wrJ;|I z*(y7#2$@GgTJ#zPVHo98KDMwlCBqo?g47jy7fzlIXFxZV-`_F>`g&iX&F~7zri7T= zd$=rn)iGJkNLdsh`|_3BM3cV8aokgFhR~B5zBSe6m?PB&)CV|9T{IL}9iI9nn@CeI z`X^KAC@TLC9QjKSRE&B>Sw{|qCn&Z@#FZDJq39Ddu0EH0BWarlYh!!jg1eO9x`#MJnyn# z>P&Ei_|6I_Kmb@d>r1yukx>AB{itajkArFXj~yw9CK2F6MRjLcaQ7rBW&d%n!i?g_cc7jtPL&wH z3dZGsNBC%as_4*Fp#65DY{@lHw&*tS4?b|C^<;|Hc!0rwXp81F&^##EF2i(j z)+CKa1w(=|X0su#=;eQh_TqfxammvexY!fp-V?Br>tZGS$_2W9G0V)J|~oxp=-IRLmL1wGYtqg;j|QaCq2`*co=r;YzwJ~ za~1hvc5!16(QM+TJmUiC%veH|`Jv3B1WYRz5>`oyVBW5Ed{=jIGZaT8voXZ8nczvA z8m3*s*^n4s^c@_-yBEA_Du7hO`l`F%4>66=>&(2ak{{c2H=-;76>U$HlT2V`xcq3>kQ;OH+&pGxY^-cB+VjN2Y&7K(8QvTY?oj? z-~H-1r03&Zw2S}%94b9)9i=il@qmQWE!<=R^2~W$p8WJz@5#`Z3`Yw&TxjT z)RPJWR6=f@h(lUAHOZX14!7Ofk6#hj`x}8-uF~f;U5@*)PJ|c{wcv@fb(<|~&0$sL zD}BvGo7TEomdj0hW;{xin_1H{=M+uA(Z&)rBDlnXr*h4*i*hv_aPN1o*L%~(o!7C0 z6hW=o$g?oN>&%TN5mb($+Q6SuW>3G){t3u2KmZEmfEIDYM&Gg_nDzNESIj!3cq*U7 z`(c!XZ=x$;c`CGjS}4mzPqZpRu?QZ^benIaws|O5dNy{1xsAB+J%%RT5^>-M!r-A9nfMMOS(PFW5OHpA zKhZp1HUZ+E+un-}x$p*ONl)G}#oaOKedJZ;yW)d{6=`RLjtO(2|v~=X++0^3S_520z=AVqZnvqa@W?198sNgV1J!7>J+S}$_P&|-iMaE2THZ6ZOIC_zhq<^^G&;ST&fUpS5w zh5LIFOr2T?vq7;8#p^vYJFloH4O5`N=;B&Wti6^y{eu8^0owgNsUWdOUJBp!W_Y)j zm9t=G=U$6m9;QrTLPE;d1j&c9MkUopCd;nX6@K<0sPUGkoTe?@nE4?7dW;*R`uhTF zxu=Ao*?XF~>+v!Vx9{+9OM==P?%(FtVm+9j%?KY{dfbTc^~j><$N4`Ug31d~{d&lB zn6t9>XIXggrUlv@DmLARp263-Lvq!r^SWgIn2=WVzl2Op1fW&&iheisPT235JG^E_ zTWWMtzsoaxkubC7uWLw|2VG`4aJB@Kn}mYxAknC{h3J~$y{kfzd`O0Q@AJz(8S&Zy?ka>y70PZa1hC` z(D5}`PxMITE`|jCS?ATfw8~C6XX@V&|6|dhZwFA9n#fGb{oQR#D^(%<}~;h*!| z-#IkNI>K@h=Ty?^E@jx@etNYCRZwDf+T87vIcq4cJObF!U&C?LFC1`OL`~U^hTwVr zuX6_AIJawdlz)T1FYv(*8>WC$&iO&5HRj;#L0)btz-3;_;cP3WYv&j&3RMJ+xEwpO zUv3%p^V~!I$?*t*G$-zz?`x_Z7awdnBH5^dx;=8?b6HQ48gJqPi?=Dhg zWFW-uKi0=Z(x?Zq>$3Z8Iz|3QdRh560y;0(myH2Wtb<891Pf1i%Qiam8f9Tw-i}h{ac7{i|^NUC#HX}IV+J3=jqw4;>ANEs|9s!z_2EARh zp2b(c{5uzoACO;zS<7Kj2i-&g;@pw__eAfN`iN|^&dsQ9MV&SwpBC2o#!4$1Ttx?e zo>r+7j5fp*Kiw0J1X}dHUhih5PUF!7hpKCj0Gp{?lE#LB z1;z2fJ8SpWmyS~(fan82Epk8rFuq^m$JDK9kui>zsQk|L29NP!X1yk$OLsjJ+TFO~ zz`jKs>QimL6{E=(bvm5FIIdN>%I5X$a?B?9byD`vcLH zU|P^>qj;mLeV;hJOerY8l0m?e{uHKAi7`WBRsQ?^us5VqJE9xu5DI%otOs+1_Vx+k zJM@QHoksX+P$*uaT^^u*e{As&Ln`yQ69l|{gntIb;++v9r{E=9rIUBwo`H~jsRB{O zCY*d#3&Mp&!RjN2a-gjp!nCP|U$g{5A%JGAdPJpU`puP*ov-m0W`N8mA=(PT;8hE?&cFZwtEx5%-#EMTE&u{xjSMW^*Q66Ir-7PN z=7;qczNAeqA<2-+PPb0dEV$;_ZZO4Y+FAqAYH{(zYqh^7_~`=3w6V!F9nC*g#J@7X z;#g#%9kKo(GNilq_~}Zb%Db#rONXGF8>8ej)OYom_V;zA5iy;kVm=(k zg3CH7dpCWo6g}7m>@hhR=bi#?DL~-RZ1q4XSi$kB;v(QSw_Yz7&$w7>c?i0tq8TVC zEaZB&HzyPVhvuEqazwfDgF`PEitTYs1v5VzE64#TT;^BszHSCyy zE?+yuOPKu@SixiSq6){<^Z}T#i8np>0xB$DxD!*bYJ|tgE|%5zDXb-9%tbdST)?(r zGVRv~SM3 zi?=!N`Aox>0Ue{wN=S&Q0}VPUL5wbNI3w=K*Z#q^#%`I5NKmOuBNrU8hL zY1zIsTM@^SU$#p*sPn18a`WM6BceeLV|BD>jLgKgU6p2E0EH{MA6Qy(Cb7K58V4rBh(+2xL$hLV=gFqgFV~6IlSml5w4rZmC006@Tz0%2> zC}Bm-3jO3PTm@vfe_jUY&uIT7R4KdNjPgev7B^66+g>J-Kgrw)^-C*4*}(c#Rpu{j zNoIF#PcoDYt4e$i;_ktS{Z-%7uw5xrp7U9}z)rV;{S<=9S^IXlrC4+F^d`SRh5GO~ zZeiMYY+{-v#_b(z>kjC7Moo&8;pNhlQ>#e*a_mny?X_za1WNSA4r1;r2our;$6y$6 z%QS*X%=}YTV(Qoe+e{B+WU0czJ$cDxaPFo?Pav~><6$6QC zxVTnXfbVO(AO0}9XC8yA>b=XE}O(W^fO`@aOOOq7%5h=8YEc0xrG<&ej`woj%gtANp{+voKkqU8?5_+iWgMGn)-z&^? zGKzIz4@>v`2Pg#*odXf9!h;bg0!OID41a9UoJa+=us?V5ron4VPGF0bpi^e^xXZ+5 zoYDK^wI&puXkTAoo|a4!{SmmHa|TLqR?4*`5oRv6-F6n3*Pz2hpdwA`I=)lZ@`cJ%j_CAe2MW^b1oFT=0GR8lgQ_E63*r?FabNi< zG0!E1UM&gLs6s91xveg$+tA+emAa94+4))%4B6v&N|i?}p31qN#pt$6mSB)|^MAMs z3(AELeG=jI<#k->DA&sWZn#$S)-oA}Xws@y(N3@5VJ71_74@diOJ|9~#;6Awb*_{e ztp=Uv&PX>5J6_0e{R~uy^WgroPy!`C+r)(`Btq^Un`r25`N0D1y9m?FZB}I^G=S~v z;eW+{mcssYfDrH47BnlPc0j{uKuXP#+hi=2^9!gm2OU}J0Ai#eLBm#d0<+(=1@8Un zjufy*I`j)>YFTAb{yLzh-j?cil>v~Z>Lwvvy@TpbUyh;6An_jh;Wn;t=%}~xS*6}? zWUk-`x2?s)BE*(FFg~eYlS+!L!V^m@{I`{3|=9OuxOrgUe3N z)kocMwvp+IT3bzm62vx&dJ`jF?UMd;lG|r2%iD>N9$~@|xrwZzRM(_Rk(wTJ@6cuD zoEDL7PaTlpHiDYKrEvdVxfyUa#CWLnOF=%u&|3MxUVGSOo*b9RqcCuFJ4JfmJyOgv z<~rlI;_(RaDw^R1p}mPqCXq%{3JRre0b4yVAOZa*-!fAg>#oB02x$3ekKWgb#a`Cgc8lJJ)k4-BJzuN zE?#Nq1w|ka4DtHsaW&Ba#c0c?FV-7C<{I3$l6YxG_XmgfZ{ru zZh&=eH1yK44x!O$Y%fNu$J7L=GS@lS8p`e(`jm$hG?&0*bl-NGBzEBw6r#p$-4`Vit$ZKajmT6iR&jrJ16XyXvt$kkrpIPX~@K-gOI@&(! zix^o>{~HdrSfeM0Af8u8IiN$kS{`2uabo|Cw21*tY@zuc0X-_K>#`c#Rp7_Y8i3V& z^=C^Uzbi+ZlC4}~g}zX|6gJsO*=4>Oc5-VAeZ6)dE9E$NGOGb-%)F!Q6= zDa}^PC_$)H`iCTiV}YMi4g*gqLu`%eAe~{2(7rprl#)yCrVGlbCs?BKy)mvyV2D5NR);YNEKA5I2dWt^I@l+MgTY36sbo6AovS!q4;0Sj)$PX};F$X% zJJIbz59V3w++SdRb@%aWBopuX^;8V-;>QPtMtJgRV^{7m_|g{;$EEbGUCCA*2q^yj zJ#X+Cy?41kYA=y7M!ZXRK=S7uo8?ulyp!_Gpofrs>{5m--9q*e50%s8>g=C)KL@)- zTwzdkSZa*>jzXvuKiak7AamWmTnrBr*OVU6WkW3H|T?NLGlBqppi?6 zq6g;9S%nj80;?=*ax=n4V8GY*v2^PoUlS*&L=kJoq4E&k&$t!LQ@^NkyeDeoXe907eQIs?B36wtA1FVC~>OUQeEcb|&jWZH3KA&`!%4&IUNc;G? z0Ehbq&Gp&rOen2A6e6ISDt;X^%~0!jb_8vHo9z)?|Miq~PnQx^EFR$2(L1_m>h!`~ zr^T(*qJYWzm`|HF2=WJ<-;F|N6)3j;nueE*iV%lq1Pm0-&_Uqx49*KF{!t}^Y&zFx zhl8%JX{FwZ-lvzbzM|KOFmeuogY;?IWny2}U-&@0zuDJ``o<7I*78Y0_3oIJWKZx~ z=1i+;b+i_MYL2P=*(MXi%OivsK;9?s9C}1*jY&q+*nx@L%CO9Rl;FU9X@D(^?uo6b;JLZ&ABP69u}kUFszV9Y&TE zFJg~ZTQgGGhrzH;N7ydss7V>equJPaeNl@|#SF?hX)bRM5>@Tb^yyLV76&+nw+_b} zd)!q#h&13xspo5a6?Qo_pYDEf?B5hH*BO$lf`^1V;UhjrM7g!yJ^Eoi`fhjW3hAhm&nO1!ckM#0WF* zr*tR#;v=X}`F24kyzk5!a8xcQR{(K<8uIH7StrTO1sET_E{^{JZiuymN*Tc+<&^2; zkye?srV0?gPyb|D*0W22a5>0zRc1ehq?A%c;Jki+laKWL&@)RvlnCBeH}5`)gfuxS zk{aSWBg7}MET*uam4vY=0h>6ju7$dB>}cIDJL;7t1fig!&kyAmMi{xUE|35_AE43qITA0e=I`tfPn>^*bcu>In)eY(k`%8Mm{L4yltV z8ZaF~)$#4Qd{c$|b&uxD4WeT@N6scQMMF_DecC&7>P2Ux$Ys+0g?Ma0#MSw;^XW?g zopkO~xoH0r5xJDavK}eWH1!pRo?akgyp}&ZD z$8a~3P3r7m`>cL5ELz5YSN>&VW>R%^$9>kblF=` zG)>o`+0lhqy0A7u+Egvi7CpE!h#N_!Ud4RwE$+r#(g=M?D8nw*a!UJ8*qns8dFp9_ zxB~yTQp9WHK!CfL=lWVx_r)}_>M;-1yhPT2c}fXS<(-p@n>|fYQS)eBgt3b`6Hs(z b*tI>taMszA#)X2IAqE4lpM;BQC?h}sSElyA literal 0 HcmV?d00001 diff --git a/media/so101/leader_middle.webp b/media/so101/leader_middle.webp new file mode 100644 index 0000000000000000000000000000000000000000..502318b3118cff70bb6150fb9ae939d955edbe45 GIT binary patch literal 35576 zcmV({K+?ZbNk&HSiU0stMM6+kP&gpuiU0txQ3RaHz8C&~ z_Yd?x#Cw%jzmiqFzs&Hr<|d3SRdkNXXaPhj*j>mUCY zG5}kd0U#sf=liSv%xWNX4uQ}*2)HqZFvb|e7-I}!j4auEa?tpU8@I2)!wVK@npmpctdk+H=__Uf=RJ57Q7h*@O6DFi6 z(R6T4;3Z%TRoaP{XVEdj*{{&Z4Lsdr9R1MlDBYasAo&#MNf4uA&JTVznR2_ujHtB1 zl}Q7jbWKPJk`m4*iE&Zd{B1PKARpt>V|-7!Wxzq?doug$q`o?9^5yK3@XiGWDSW+> zcSVx7Fo89hS{uUHi}p4Nk6j;8`%3-X`dM5~mvWo{pu}rl80e2zM+2aH8(BP}2yHl9 zHTiK-Q%Rxix8#Q8=hQh^Ym#pZX1^^tj(P~m#}q<0^IF8xBKNbdrXDWXin^Q-&L@|$ zVR|D7`i|B~iGny_vv>_f`mwK9m9NRetQwA^wVHvoX~}?{*eYkGlxd$+5T)@Var;6%f$Drs02$IL*xk))EieW1mbiOlj-uI>D`Y4#il+$(5$U2te}vj%;b^ zmnp6xse>1w@~`=M&@%5FP=i>pwgU#DCw(t1n_de~Oq+wvGdMYR=%?80AS5W;!O<^* zG>>GHDw=IQmoo!+*E@){4okC)9jucR1Zg6pHSDXDJYr1Xw_;Igr@HvmkTmK zI;Dt3Z{3U8+&aOif#kEi<9w99ojD5bqk%h@mG+TiA1ps}95b_u#ijP;r#e^-p7xw2 z-{}ol#F}>Fpn3i1J~09E?;c@NfW{2SF~N+(l_GIMis&G{U$GU7gBW96@6%@L2<3jw z)ErTv**MFr<&3|Mw!a*Wk)M;whfE2Blvd>JSu8|sBtaNxMxqJ9H7cmBEj=n2yd5wz zvaYUZDM$$nNPQBoOp7b^%Hn|NmI+O0hLM6H*v1>qaN08D-WJ4qO7?OEdA68! z66$cXeqX+HPuPq)egjf(7i-(=fQM35=a;lvrLol+e7TYb4aKC%yg~D%&WucFa3#fj z0q@DzbNV1=irCU zlvxA@q{ncL!fTf>sEsJZ70SCiN7vZqGg_7*txmvX5Tk6kAuWS|AY)d!)cfI^`veXp zT&HHC`k)l3it1^EO$>0pI3KjWwjoO;4;pn+G0qxO>~Tg8jI9@cad|pZvgbZCLgowI>D3Ne4Am@<_MllBuFRwlfeP>M`?VcY`7y) z1E6yV(+VUq{Zv6x<*UU(kVv@=|K0p8$X_{rv!VyTuFkPIcCuiP@>~SO`9V%!9d@@@ zSdEx>ut#5*c-}LmN(-;Gj(6LG2??L(9xOom?uy@cg)_?y2>$2+G(pv_KE7 z)&Ny^PG*vL=O}K@uxcQrGf&^XRvI%MkjTsC9xoEMe^2vnFj05Y!3h1;e{)r>!!nvAuf#Ast48DMAnfDurIX07iHG=wjZgyn-dm)W_&Bt zMP#fEOd;wzh;e4Rt0>HC>ioFtoo~KKE{MM%?1CP%a-wrXmz&|O^&o^0gwORip>CQF zS_M+|iU6Fp4@otP&X66?a5rDN8;tOg%df5vz)hzjf`_b8d5#|U{5jS2f}q7u(byZatQw`U*pR+FOIDFRU@Edp#9a$1AAJ5F+*p-hH)GvyP>=O3 z&_Ma$iTnoCk~1>dCb=*TWRwvsn?x|+yl9FZcdeJ*)b-lVMF+0%=X}3r`z@6}!D!Y4 z@nm5q1I<2&E4a)tP%pO-orQtVZb0>JHh_Z{tlGJ*&8E10FqQZ8wId4SvmZ*yfET?5 zm+b!+k(ZYc&4^mDZA@no|Kft-rX|a?H(2d4%j9YkVejI+RP;RDhhS}%r2V~G!aVU_ zI1KqRHnNo6EEN$oKWp>LlJ4UZjGe#DvFu2(iFp?SmY}_t2H03)#WW)dIH1B(srcN; z%$(1eSV+<;u4b_^5h2CuA68TW6oenD-u$^u*EH7S!rZag-u5~!v@6e?{2SNIw}=q6 z$Fe-Tk(Bqfg|y#q$$)Axsa!+zJ^6@gA2Nu2sf;P;RkTYBP3KQ z#v%CO_YHJ!v1pILk}x-&`0%mo$yr#!iqOcN%A>^c34PyR`d~HWX8I6`=2w~euC8;7 zNlrX-E)vze+c$&$<{ahdW_}_aM&_HHS#8l1v{sToS(o<1l8z*Mg&+DmX=vLz+!!Nx zAy3hX6tXm@)JZOF9_UO11(PJDCy$jPAFf|f^CGFyovJ?67SZJ7zsn2L}DhVr29 zbK(H!i$6QaxmUzP>Fr%BH_XZj=-T?!E!RTE#|$AzT6k*EuvFgjHq67xgQeS|z{=*c zBmd+7We5DWm}wo>6I6&pIBlW{4$faqw5d*#iiJ5wZlj?_F*} z#*U71W=YL#pgteb!U|NP8YdJcL8mj@C^&W;P|yQtq?){WE=T>Q7x>#&i{r&qm6TN{ zK7GGm{E(J2N{ma1B*Hj3zk`H6d=MGIo9U*!-T26A6W}v&z-P3b__k8}6A)*B+N?-S z)AkOP-95liEDF+cBVx{`U#-CP5N1kuT((g72kjOlBZM^;;DdvTR6L^h*G7~NY?3)f zf@qljFtI|L947=lt>O{5wG(ZV!KxbfAtT>=vgNHR+p-e4&S5o)l;2^3AU;-h$Ubo? zlo%m-(z8Zq|1@QHKk+2&SQ{*I{-&4p5H=2NL7K}=n0PSLzXQ+yZKVm7{Pw=~wgN3% zs<{SUae2T4t>Msb#YfD5>>5paYpG?XUPhCYc4M)&xYO_G;md=8qRs0EXmz!$Ho6Ov z;HCZydIl-%pWBK^UMv6|AVoGa$od?LYGa)Ff03_(&RC#98j5VnL=efDWWv=L>g`{A z8_d#W$%&unllJD->yP%2W46#+yzK` z?NEjID;f$hDEP5}q5fY<6yO?=$!NM=>+Jy%xiM{xl9!Ns;;;`|x za^ND-p3&pZfYVBdM8+y`+7plq(?e5KD#6K$1t>4N>#|B9-x?6>SZe_0_Rq5TXwtEK83PLbPeU@Kjyh%;30cz~@Z$OAiH5 zmKR-{d{b7a11=5(8)vdiMELoKf`$bF0w+ebm=Qz)qaagseUTwe`|FCp7k?uaoa*;}nr^WljTF*)^d7OF1d->x~l5!b%F*1!iVAPd59w)>-|u;?0k2Z#Cy8s8>UsTVHjx zz%u;9yA8Vi6)%Z@B2#N92Zi2r4eAsDQJ)Rl4=OpzAA(o&J;^YHB*8UKh$xCg$^15j z<%}tzBGv35r{Kyyf+d>3&9e>Iza*TKq+C|qNdYOf@DmkVCG9R)2Vi^waJ#|Mgm(4! z@BfuomP5f;j-h&$(_+UUH0kh3XEbMhI8#Ovv3?$@mz-<*W!9{{9;#Et}(`Jlj z*@;_ozg4@w>PQoYtYM65+z`!P} z3BvU61xf1Agi06=%Mm%r)RX*l?d^Yj;D*`C{T;vF`efzsK~jfeyOwsq+4XOr zp&m3>^1Ye17g>jWNMwP9gM zw>>UlFJ$*J65a%jh#}py*}(-14qn#_&;BN?LETRh>a*Bl0MDv)SMSIT{0*{8v;i^M z1fl^S*6Z5n{O#++QJ^kOTVhS)TEd<`->9DC*dyG;qDhGr`>iZvXBZ+(gH#wO@!j!A zZ<%;;N!@}4!)CjF7QwMNIKY6pkU%Nz?vU>Sjg)R5&mQ0>PA0?GI^*HIilXTLkn7y} zVt(1{gjL^JJ_FszKUlvPFh?T~*( z#-K#~ly}s&JR3}fnF;iLBnj~EhlLS8=dX%>Y-!HnAY?W`m6jH7TR208+p@Ia#gdWmdO zNY1Boy*?`T2^#I8`&l9CN`5=&Gp(CF!Hs4+=y0A`PnKBTMLFe(jJ?MalEmqv zwhpTFZ!kPegnTx{mli`|{wTC={{&=~EPln2D z?BUP%kpb7AQlnGr@~ERGG)qmoC0XeuPia%2$~5lFa+UmTDB`Y6YI3=sHH_-%y49iG zXe5GnkKxwRr5l9`+kCPs7knbMBft0q1V_H0~8pS6R2diX0$&e zKFJjBOW$5p&T11MHiR#?Z|ey^?zhAM-DS@x8cd_7u|Mn(Y1aeL=<<3h0PnzS=qq26onaNbY ze?IMwst&his9iK?CykQxFkBV}s?64tEdo4bDi3`D5AJ6nU8*UKyqA&*`vV)Nzhjg{ zSLO`ZrLb(h^mCn0iANCPa{8b9)O(^E&EDO0rzd>}ItTn_U?MUuNEl~^&3cE`UgRx} z@X73?TEDmg&^j7y{4s_)k<5e}8K{S{2dLL@X}f(9zG$yC%hd)7A{n5^zb{PaaD&}M z$eU6yA10G+`G=e(5caxymfM$R-mNRliZxuc%2lw9oD@)}lh67pJM_jr_TYtZ1a4X7 zb2hl|t7*|*PV%o(n%VV*YJ)8AE z+#)jXL1$MSpx8+xT?sOD)z>(Uh^wrv)G;>-c=XgD1oz5nmU#Cx-ace89k+WU@6w=c zw*oxbZlskpYXZmX3EHnR@SP*fs3HItR^a8o~ zY8lrTyGKujs7s-If~wHXLYE3ZkT~@kVM5UK)@p^9k+)LhvF;Fkg-I3S48CB_&zN6> zs`#M=HbX=eS%$!5GvsR8pfW2AkqWNqC7cWH6EeHx(9+PfH-kn`3fBX!C*=}R6iH>6M5`emWlR^wIoV}L>EMYJC!(lkdJrp zNTyb_POJ-8s(fcMd{k^mbkJB#CJg9aqGMi-=R3cl>615Y5gn&fR1KZA#{IW}nqG!) zjJGAUsDKiryec#}q}Z8`iy+$*e-H8?RV{}koqQztmx-x^aOgH8^*|Oslx>gbWOpf;F$QZ?Xr0Vzahx9P@FQjm_Fg)azMm zHUFqqjQK1NsVM9x7?}8tawkPR8lxm|Nny_(X7?gE2#{Dy&Q>M!@+Itq*f>fLvy)a= zO46f3utvYzCnlBQ4=T*V150QYo)kwg84Hkem@=+0+93c%_9i2AlSzSCO9?`ge*|76 zU=}${Wa=127wmd@on9Dw7C3z0^RQFx5iKyBrXV}%Hg41}IB8^k<6!#|*8^(1;N%D* zwn~EP_vfSz@Vh}tz|GJL`iQ@0R(i%)e1n{EY34*_6{9Rfzo4HKJ0=8~G+j_-&S~BF zvBfubrG~QqF!fb$$232rNBH}@ZaW*RkLo^(@w6O^WlQ+ z83BFnAtRo1JXfb;NFYEy<^S|N-6j85vz$`t_urTq0cEwa2g8XhvUmHIEHb&o*bDgW zUk)tHG}1!{F=BNTRwA#mE~91c+zp5x?AkNrq(3f3rp80;t5ZTgzX>DP4Hb3U>~a?H zF~Yv7c^GY@)=PbN(poN$vX1WBdJGShE=?+KMC0wFD+&}(xPw)42S}mT;}nC=rftydVy2t~Ku=b&hIVg~R9lV4W)5C7$6uS_H^IkYi9O$JWYTdgPmRYF6GL4g)PcbRTgq#)}42aZKe zVSJ|<`X7xeej0nd((Qw2pen>1b15B5IvS4Z!e2 z0?-*07Vx03t}C7=^`C;Z-(aF7R&t*jksAJKCF01fx>q4{D#ukh*-QV)TT*(Udh#gQ zprO_#)r%ftS#T}(q#%W|6t<>=rgbdXv14B?h2QCk``{R0?(sSe zLTS~8OH}Kfotk5o#@K+OwGw>MiIvp1tO90P$z3#T?x+L`Kxz;3>?xKycTD>B4#rn?N|He0O1k zv)O$CwnITq>X)WUR%?DhwE+MM*)04FI@;n>2aMh0xgC)SjF@V2Q4|6@G7*$S*aN5W z(en3>oPo=(?7Eu0%@V;kZ?sR38|RGXvP?GhvLc^Fm< zM!ccWY7fV9k*nP>C73FR%_Tz!bbf(~JSIFzyAKTy5{jFq>4LGT@a`8RbZoO!(1|$# z!rxd}GO2;0f9(DZsVmBZ*i$hlQa3OZ-(IxrZS75IsThgU)%LwQBNutv;4)G4z3|6N z_&`T5olV1$X1%*7J&!+*qExg$7adhKYA7J%ST=F`oHK&bBzb7J2&vIvRU@dFLMHca zvwtFw-8{1I^0}^dkeymOO!!~7n!*2T0?-{u1A!7SuaRd}qTmA~Xg6JePph_AaG&XZF8D%4Ork7sBCfi zCk`FqOyo-X6Up6pcmjWz z2wl3p7F!|Ya$b*vW34cHJop_s-kKR8`v|z2wfFuZ3pd|`d1>-$s>HSyXE54Z$2%5* z!&Ma0V#o{0H8s63Z_@H&UAAQ#s-TMQd@`JYn!tI5~(Z{I_XF-NO)04STzpP=( zd!N3s5sA|LUB+!vg71~F^eexs(((WKWUiO}=?{5_n*~Za%)K2$*MCx!kn}u?cxcMd zblD(AdgIWrDUdeaIE6#JQF9A1{Ko4VF+5bfIaZ$VTH4L+!1Y44Zj7$5rd|P%TRHLo zXZmg%$+L?klI;->p`J6agt@mFc6`42-ifkf8B4qjp(YUyBB$+a&38My{J-JJz!N!+ zMAFzq!+Qc(p$_B@wH|s{4^k*dWH?IiCXPhMqFP{kDHs| z$i`qhu7L~dYXt)vhO1`4wqa*ro z$SN_W+T4ovN(4%Tf)37z!I29d%21bDC~6(T5KnC&B2_tCR(2MN8+wXbBc)y>@Ac{>;V1XB*)ybqbDs>NXu+YG4@dG=p%zm z$X&+c>khW5K}kK!U8MMv4fcq?7$#r+vT^bHZW6qwg&jlrNnu0!ZS<^My#omrOs!Nd zp$aM~Wd#|KlxMwYdV1F|D!8k;$MmUdTT}UDUMeS~e?_nxiTBt*0NV_k!6QjB|JMXO zUn!;H?P>-FZvV)@x7o~7tj(>4>ImJG$nXaFwZYs#eh)ktlSQ8kmPntVysIu^>^^Bv z>&5;Bdi}+^rmJyL(?RpI@tnYkR+CRA{Uhbd zHf@AY7MwAXAW4YY&+BH`=+C}~Z?h7;Fo_LM**FvBODr4!FXH-F8=h8%#6XS8e*qUr7IF-EYgN*)G5?gBDgP#FI|^$_mV!cU%o;NcSB zfXyY=q`OuWSMZsMNxlsPv~M8iWj6jcCZMYH7+9koW{6gv*{BC2kvM-$*38NKLkEeS zf@?S)^C0a41Z}ZJZtoH6(;ty@ZVgU!rdV^K{tSrN$Z?(`o`@yEN$S71o9Bp#ztY|5d&F~nfBPi zlGH@jxhC+oeTKU@wRHa>uR2X`FU64ud@q$xCVX0aAY%Vj7X(x1=R+^~#~Wl~FeD7+ zIN*0bviBHfhXVeMgblq1RZ|yB9FZWy8T0#|i(64)ft5nInmDymR&-zE(<;n#_;EZ; zc&UOjSE$6yGUN|iAO5h!K)CMInq9uexzhGHT1#)=9Td$x_07lWmEqnCM5ho0092)cDqG!=)#|R z$6O5=I)bQ1xh4adAa&?(G+wV=cVpFAL$};BYVpA%Jv7Nb}VBEa2%Ny zbKK<05~C;EfKx#O)WZNof7(#WnHD|kv~f1|xfg?=!+nr82GnnF?bN69Lm}mb;xnn* z<$Mww!)N1kt;L){T+V>*%7^#gCJ3+>ysVh#@E*ySRwxM9(3t-meF1<}*NXT!XKD$P zqyYP-kkStG7-In+byd2*=JgaE_+?^pX1}|jN#Z1vCv!y~_V&^urn#9HylDh5+4*Oy zP-#v=k-@}HIAh>_ZPxHAt;-GbJtfJ7({G8tFYq7l*-DH2d)-tgI*}(ficL8xq}qWH z7)aCkGTyj4*P?+P+;EMxBRLEu6v0JgAe(K$-p(DZp~&RB8@^y{eKMVXCo^xt`v8@Q z%_E|e8Ly*x@f?j5y2zKS4n|eN-8TWs)BPs}eaX9O_4Qr~8vr5!7Q&$ETOZ4qX^=0A z0IwNahO&!Z`SAAPtoq<7+u}7tEd?tt=V#L+g~H|ySyVG=Cw?y-4V@`hHV7s-jHH;n z%nJ;;3-RaXS1Z_uXoYM-P)+-?br}o*ZGTGzc#vn5t-Q7L8D)k7|2rhj+1<(}_C4wp7+VxLan1#TYl|o# zXhBejjNZ*)N5Ix}+CmFNG6n!?wWFt02`{y^ivb0msBj5CI^W6x0ateXbiKzqEy5JR zbQchc>DjoHTz5+WNw0Jwp=6PIZX;`xR_5$peC=$k=(c0KaWGS zGX%Kn0ln3SEtD9+&je5w)<;Vu% zKxq-&X3jthp(u(9g=&@&DCa0osW=NB;v}i(pZ9(cjk5;;9wpcoSMVr6W>_Jn2uWI9 z2)>PbrAX7R7#jU~EPW571nxYT+k|-eRA?UpD#peMxtv*#o=vH&1YeNHFw zsPitsDF86*KRs_@(KDi=hE#Q=@C8F>BeHblc=%SQHO1hSuq54 z0qO^i`%CBSk6Wp@qfk7L$rb#;Jpce!1_tfi%9&tyFQ5P!Xy?P$IcVErPTa;B=P6DD z*Z_x800Xo~c#1|OX6*n1u>dhhyV&S4jv?esvU%z~8+)53Y_ghnjMR`)-^QF+XX6<| zuSN4=GqtVA04ba#=O8)&gnObyZsU{SCgnX-X>+nQNKz)31hcg?oeG6pL#m#vV1nfOoKq1>8Ip9ap%61C1X#0jGQQJI^>m(++y5aY zzsJN`Fc1id{mpQ|3W`MlZdGsI?*0=S^=xA_<>lLJ*4AgJ(h7siC$_o^De?;~+f!~4 z^JXN?fxh*Pih)%*k4!Xg5pk~KWa|R##Ws{9E1)eZ_F_9aWkl^yrUPa*P>z76S&rjG z)NLCIwT5rrbBwlq*dJ?4$Ri@fM=VCy?70v^K0V)C8E6W?JGn6%oik0QCh}pys`h{s zIf>VBDZ6RBwG?|37Gyef?VUh=+xtKHLhN;vR zTu&LuDAMSG8cYl@1zah# z`i0wzT;vzd-^Waf*sBhP|B8C-<9$*_U%<2cGc(!J;^e-!H*%!8B*vb|s14mofHC)H z3;PF;(lYH)J+>fUh6LrDo8EpJ^^@8F4jU*C76hZWcHwwoCcXskG&+GS3=0-VMA_YL zgaSlB<=jW#&o=$>$KE?&2n?`O>ww^;DSj}J2&msPVX?}+dRd>Rh!wiw!xFik*;Mh< zxh)iGIkF%+V4)8K+N;5?^@(wny7n7c zKL2jxoEy+4UPRWa{EFc7P%J}mV-+AlAb2gEEeo!o`Q4 zs0@q&jg#0G5R@T)oj#WUl*dp6Mv+dgg|W?0wt%>F6IJx{^5Rrkv=g2Row&gNRzwYy zJ&-9m5c2m)ImyApX-8O%RxcQL5-u9v()ap9B^_3piV^{ zQ~mZgUi0x+zq14I0KS9;tMMvnO1&mAIE^(eQubr9T}|y(12da6l%NzSu8zE zz&}$YQJbLHoUemN1Q<~Jl`~8zmz-$xqu=DVXCnA0IX%}AML2eL0 zK37;vnKM{GRY9>E6iZTIrkkMLXk0xa3jrW%6s8*Crmj;O+x)ZmmS@{%Vn;%J>eS%u z$$^z^4TzpR9vz_l5n?Z$-yMk(;W*wxeI$~YcmAQ;j4P}+EqF7vR&9lEF!J}-B@0!$ z9YFFV4k)zqA=u6%Apchx0llaPJ3Lb9D}p`$gpi%Mq1VnfS41YNob z*~H5Ga@LV~^w|}iZCt)TW08a=ww^1kJNK*Xm`(8!6tTxox=({l@Ol{MQ_A|!_)p() zogtgxyRy|iw%v9B zq@X#`qj{p!H4N|e@1|XP5IPwRNz^8IBMC#;*LxDtb+MFgG>{jHx*z7oAns*+k@S0Q z`}CZ{Q( zs^PC|ABP0vFs3`Kv8hhu>U!j0b|YcwZrKV?V*Fl=yepPXAlzZF!UN79g7Kos)S-h= z_v*N|oy5RA z-utU6%98Kz_;>xp`Df?aWPC!*XT`WOQZ*Rl+fH-nFyJ_AtOqtr=Rpt*V)eWSbK$X? zHG>O1??Q?Db;bG_t>+SwD=YN);Q72zkO~d01yp_4v;AA9JhZHvSxz2XN@D>$w+jl| zT<>cB`y@jazmC>jQEoOGW5}jn`?-h9Lyn-?vF&q%kPepp+INqPQ|fWrWYHP;zg~K- zQ@ue;-{?ak43Z|a`r_u{P{b@lAmLxf<<;-NDJ&zULm8i&mU&o7^wH%&-uR9Ir^EMW z_9y}n%Z`a_1%;MoG{;kw_O<*IOXqkE&367F_7-pg%I8gjyfy8BKepRICHZ;3N#K>} zBRDf^8&rw?1C*lgB-U$`M^ZFRLh~JCdYsLt<}rW|W12A+`Hq{IsPw%mS~{B=H>k{{ z_~ge)SH~Tn7&cHUr2}g`s1w0^koJE5yWd^j(6=_WPM*l(FYAlL{RvGci>oA zmfNh+@W^nU5SdVs_Whac1NYo!a=yUaMpD%!k1{c1LvTIHTK4QG1M2`PaOiLlO}t1| zAhXCwPP$L{!7v^8s7J$Rcj=gVH}At_j};M+qwAZbeodw>qUlg-3RvBcj%Ac@lJ#`- z(*G9IQzhH#NL!>AoJ92EN>Jl-H z7+pA^WnZXsSQTdl6^9iqg5c2XiyLdhs>Ktz{Q&0EoY`JgmS!?2>5Q)hCdtwjYZEJUL6ar_*XuCDu_Ls=Eqg z*ny0k*sSc3@(ES>C8EXG4C~|K1W@?OOI;f+=8OsYK#M75J z;v?LK#w5r!>`Hyap2GUaE6N%&|KPw#ugrQ&(`i&SzCB)HQf z&9-qiWMKD#gWOv(0a`w7S*bw1E!I7nE?ZQ;+v%8(zpK1?&c^-^kL2#h z&!95D5quc*V-zR$O6MGJUN+US6(YXbLy=>SFg0bmDgZ|tt{ zewFYON^on8H(z<(qa`6S=SeL=?2X+d3@v*!9bt0OR5v{vS!^qyBJp;W)Y}poKMTF* z6)=FnjB;zZ!d1_RUTSghYf~C+w`V_=CTXe|2b4Xvo>#yJ;Ys}nm>17eND?QBsTCKR zdu9+qg=?Q!fc4$U2%M~*>DZ58bBL)oo52zdJCTKYK=`BIwen{9ax|Ec(~ckH$p{+s zxyxT}T06XbGyq?pEd}1QoSmc3_osw*0~1>>+e^$(!1L-R{dnMvuql7O+{Q2E2&pPn zSVEBYQqUaCD*&7Xn?-q~!v1Sy6;V;5LFaW=R!OdeNHsPmPQ zbLOQJhn9mBPGnHZ$2=is2Yh?y+0gJjlB(HFKxAY=@qrw^lC(-SwOY`VF9H{bRSG%Y zr}UW)q@?~q4L(mc)=Ae=$BPm%KX>&ohFa2Pk|gwj#6x7WG-(oVET&OihdpR0fh{|q zRsJ~(B0JibDcrvER7BWPqy$)5ZkoD2iTp6Su!Iow8g)j}@1NDIs_6ORA0Zek6lS)H zF$2ZIVk_+o5U>A>OcI6CyNj6D0^ zALGp8ko&9nAe*xrPF0>ZSvQXagJ=A&}YwKE&edlgF5jTnFqSY3t=N z2}h9!p{Q~eXA;3c@H`>Vrb5TluhR_D#jvS7;m~rHE0BxgT{d4<`lPUTPaxPI=&b2P zJSO;=z@uFyg83Wha&S2h8;{)!fM)WImoibMin=A74V7Hv@ib*LhP5f-rlm{&qRlHL zagC-#^xi7Ma3evxHAcvCG7IG^`I{Quve_xjkzjT*hQ(a&(4mtRVZ2^989;R<>>%KL z4xT{PTpNNv;sw}*@Kor?RFVYDmQ6moA2>~DN&7?~uKY7eoG593b3&4E4Q% zmDi!JvgCyf9M<#9Px@o4FD=Hv6PEZ76Hz(O3VdUpV#ixH8ICkAf&UcIFpn z4cPM%AXD@YPbxVSM-nUZMavtCO^qWUWfiRJ+rBbZH~qs!9@Mt<7&dhC8e&41aAmx) zpM)@F&QybLBUXjOg+SzuCK7hI0c!adt5yY+89x)E>_~g@xCvA!~X0K3@UUG}h9348ofdQ!>ZPM_X z^qqXn))ZkEBFz9!v~i~42*^APaqNYzj>;DF%XiJDJC^Sk7i<&{&r0$_0`^P&VHrI& z&`!UDDiTY6rqAv<;YKS4FJK~*qWeYPn6kBgK5)u+2hv|-Ic}LMw$S+?M}Y+7dQ1%{ z3>{G4D)$gov)F;e3rN>RI@6FxR1f2F3B30All4tSo@G$U#&%ZE;}?CyUh-2L*+)D^yPOjMPc6Tb_bohcMlN_;Zf zJtL8LlUS`tOi}#?F-;cPUyW+}wU97jZW{^txH6bWXPTm7{yOQzbUIq{ zUc3BjCCZcx}Q(PIdx2xeKMqCBv|Fx#rAn9 zk$q#>5Uo6>O2CF5K0J9XMRy7X?WGz!1^XNXNR)W4F3)o&iuyYUj<*~GwpIfHkHIV; z!aKNU%P&Ls?C)nSBR*i;e>OsHX@-(qptUq5wbN#e)|80&_!xxlLQ-e#ns{(^ZwZq_ zD4U%$`PP#un~~04yXc7P@-*idBCf>5?;c-)Brv`x%W2^s#UAIwET75wLJ@0po zGqZ!#gxbNWe8P*mz7X1W8KqFK1UyJSt1ZAXw`OSR(?C%dVXg(?;AH6Ms7-1*(n%s* zgN)4rCOD;M@E;tb3x6OmUyk%B9B7TgHhXvAPyf3cGBcJ`1@S|Vzv4P+?L9HJI@?8*_NjAAvYMbq)0fCp8i=_Sg04ZkLMRH}049wt8y_cLqJ<8K1-B0b(+gqjX2;y;vu z+S4$=`EDhvnWVtmP%`4*`{f57utMd=Wsb-W#@v-J3;B-nJw%QkNkKq+guaLm4RIiV zx_q;m)+sj$Nvy$5W>TYT2+DF%zrgP*tdytz#RYs+C?L}y#nB7Rw^PoKE?1M2$8(;D ze*X452Gk5wx@Xut#`+EGDVhle^oU^U2I=W1m~j93GFN7+m&6XE4sz)9@rOOW-py$C z^98V~8cjoG=a17&ZHKq7*Uzc^Mo0L)iKAvpLO#9mrcEjv`^2TFw)hd&+m;p)9aehk zh!B^hm^_mZo(IV*NO^z#vTbeiPDo|vynzC)&oEJVW%M{9E&xnf7pTrp{&pOz<%~~i zIJcWA<_oq9m&5YkOnM#6oIM8j4RaPvO&?X2fae_^b(}fvf8x3wGS{xS=yZR{pA5qz z6Wou4bng$KnG9%_>}Ji+h7jOpQzFwS4UF!pbRnw)=`amJEBWS#c3t{yKS8LGZi z+U%Rd)SY;~j>%E`ih89PGW2y%4zcCu^^n$sO^+>)ccSL2Q7OX*g0xECmE&y86U!^L zjH8a#bt{cFb;byqCyJ-(QY}Ip&k#7tlR@T&q0zdt&VSK6B z3e_-APST(t+QkAZTgIY>KH>-Pw5V<0WoJ$W=$l;rtTm?!NbRHEW}cKoJ;qvCJ?y6B z79-#f&8$byo#m}8-Xo_~J*oRvW6QgF=En7`+lmF6%{MMasGdFr`NKrwzn|~V0*!J? z{BsCn_PGQ-J$ZU4>rYQdP(A`^B}4U_-*4htQ*7da3hew~C3`?AQ!-zTTs-71Gjq9J zJla+CF4TDdA}Dr(ZH{BcLGyMlZ#0MZ&Dn$Hp)c+6vKK5>iG=!Z556B+%G{Z*q%X z-iV|`bUK_@5G$rKn^b+^cFxaiml04q4X7U$4I(I6+MG8Xq5{lkFrU(~!RPhq0vCJn zdd8xhB^RFZq)hVS_Y3(e-`-*esXc{Zj7!7paf1v-27)6qzZl$|I48~*p2v9D*D(X! z&R%K@+j7HdIIZ2%RNC#Cva|or6h`dwX{}RN`h)K|Gko1_ zVF5YYQ9se_?55Tjp8FfRk#VJzup zfO#SH?9*foGm7NX>|6dis!I#7dcL5Bv~Z;(q`&iiBpa1r`Vg&&4Q$ux<-WUC#TX4Wn$Uwp5pQvS`~E zJn{G!O77Vo(h~?^G1#d;Q{99X-4Wg8K78sY z&OsyGy_D&X^^~f@tsnvdu#2BbzO<8Id;tMSA_eo*iUlaywO=v0(xCAwG`|c9;;VwD zs0z((l%MZh?(?grr2HbbU^ z)O}shAEq=0C)=>%^eSUcI$w1-c6yWE(V~aJjs7S20$ir3YF%t@k^~5r z5LsefjzhJ5N-c*D55Bv6XTB&Zo5E7!(y6l`m9K-Nh@O9)qR@#vDMGA>^1UABsKDZo zijgD-Xz{?}a}G`l^v*;_3Y@c_RisIB9b;WWDc~zT&Xv?(ZE5~OyS)W7znnU~N`fPlf*TcB)o<1mtmJ@7EtM-vR zhII%n>xY|8!;(K-Okmy}sh8LX!*(1ud^$T_9X+@Lf*sOsgS*VdkNGZ3O1BYnIlqI3jFO?9M1BV7S%<(qe%< z(-5|;ld!_x`q4&`5R)Q3?1p{C$qw=U&QVai=_=OQe^k6b$heoebw*8b8 z*phzEd4E^O+0xS5j>o8J@<<30J61ZSh^}z9+_-L`YG4!tK~)PwWZyHY0d@gVUsBl< zX4EC!hc+duVQ%*5x)tqr<*jR^_pnalNbaiRZVcJntZ->8?NG z2c$M%1Er>REof5b-I(04NzH9nbMzq1!wxE;mV0B#hS&_f-!>w20O9v)Jta9IomfCu z5GRpBUN25F!F#=DMZD*MNzQvIuO1MI-}HKxFf_d#(Yu|$6l+`HOGTqmZN|ca`9#3B z#WcOMQZ4fkUFUHE58bXvs#sDB&o=ed2cs-)K5{zOdOL!yH?R2=hxHmLbGqV{Ao^%u zwx_L~QI&%+@rw?HOm_?`9=41=W>`uVKPdcLF2hdmuB2<`R?;3UND8av&qdELYh)2D z0J%y7{8=r+rdEz~xY*r@h{g7E&Mtaml0tvi25t(rR?zUJIykYC}g}I-D5=cy$x@C;}=_o@$yw~Q(%Xs5!)-T1gE$hik z$g`^~gkIKp+^#(OihBt}T(JPVNb9r2M%AcjttlSwtG?uvT4sSh=3)7zwOgoRAT7?` z2NYPkE6)Wf-s~yxKx&A2Iqg`dYDIw?>6~{weR>uqnT`b7>MXA4v2qRwueXUPCN9CX zcLa>hW%Uw;G6RdtEvK#%0ZBz*))R$iEKV`FZU56m+9!4WhZ54!Z{ca5XBYrph`&u@ zmn>sepy%_Ikk(qa4mNkJHTRxM7Sw-d^C*c-^~q@hsVqO5g14k*3EU_;_Zf=ZottX< z@vKO>hgXCS#|c@}WB%HHAI=lM!g{g|(f~ygQA5oy;BMbqNcGxkWoo2 z@+yv%Zxsd!v{}piqqq7+L^?K(M)W`+K}VRS0Z1@_d*WeswboI-10sHSqTBTvECDmE z53fXWPsAq7Z(goJtU9ygVTW;0es0g=|idr^4>=B+KdV< zs40Q6b-1C%gaMhMNZtp+qp9;JO`-`so!b4RpYLQjJz0@BCRl`LQS@l)-kVNxsRrw(foam`OOE7QZE{tKYfg}xUi}#kNq9oe+h3>TP-)XhK4+e1&Zs!@PM_)w?^V@?JD#;NcwGG06VR=ALF%X3VE>sa9s0_PpgIM}W3%$HHg6NKrSE$!Es)bda^ zcqP_TZfDBuimvi+*el*6ipOw~}*XRav+d+SKTHnXT%a zgS_CQa8%+K-dB2Ihji6KJGqG}A5O&W`zm=#>`Pck3{fwy-c_iuWFZ5)95qnDwNx7L zw*T9AjA1tU7r>&ax?NMlm&J>3jmTg=lsIGWOsxBK*5Qv`XrXQp6?(K8z92uwlcD+o zcXF1mG;_=S*{!~Uf6vI#N;8{Pj%B1e`{J9Acv%=r1n^+m{X72pH1R|u73I6Y9cVk4 z#kmRL*h5g)kAs6mcTxK|K@dq%J;-~_*9H!e)Dl%O{nUMbzo6QsIT{8AnX0GFj`zHE%=dKbJ&G5*AwioS z*IWhwcn(DEy0CN4f$!(xkgafl8U8h~h$pM2@Xd{P%MckfRegITF0>;G{I6-2^ZRVX zp)t#yrQVye8zhcYHiDWbPUFl53gX$QH1{yJWJ0A`|d#nGH(o zU^3easzlK*7vsX_u@cg$=Z#?^PyX1gy85I&=qlj9eqzJ0JP>Az!^|t}6n%oX#TlOV z0gG9_MNGcS8j)CoG?3DRUL7>VP|4hU6RE|rMin9&^rS5<8o6J`F@d1u3OI(Ci&GgGK zwe;C6zG0*(MEODdG=1uKd^#u0pchxVw5})Na#UZN6iTny6P0gWgw6I1X;BIBC4p;a zs>~i_x|)MOzC+z|mB_u5K(g2PJM-)B4S0qrCGpn+-lvzd2BVOKG?mB*&#`eH&g=?t zsQhkMVSZTG6j$?PT&gefR5S|toyz@6OZ{LaG}4vCo%^6Ahwl)q&gUy%@@AHoD0Sow zMZyXJr>Z|jXmrY@)J3=#Wlf{#UGk3^wjL38 z57(mgDy^>Tv))Sm$uoJVHG3pjM(1Lh#)m0onv_lo*$ikXg1WNaN42{(!mh67NRvXN z)z1RPdk^+MD}(hPsk{7ypVmX&A5 zB#_cs#9C0WO(O{J5=mrZ(Q-?Yy})7Lu$m%$VgNazu3Da&hnx*Dzr?`Xmo{2#lXG6- zxsVOKp9pX{CoVvrd-}P8lWR+M9cOumnWR6;Enn>sZ-^KhH<6V~93zOlhqlH6cKi{i zy6CS5k7khIr$%L}Yl_WPnHO%hc8=$II^ppvS7b*ugi2_{(z#SY3qcZ6-gX{mZx3wz zOLE=oNNAvsk$twxA>!CND*LczE**v#Xs@wysDjPxzSx62ZWY?FFx``B+CK;w>3t)|dnc?L+cc$VCOQy#M*TRC?Y;L|~1W#&V!&U`##NQ@)ZzKhDYKPN(h4kb>wE zO-^8QHk9w0Qagn~NcIeT)`=UoV>Zo$bQ*7?<{|Bk7#@)$zoz6RJ;}+_2c!k0%!;)y zMe?w0zeH)}QI+R_SyKyw;})!xY?}LQ6h+8XZ%v+b)kq6AHh%!F1Fd?R0^Xm$kpN}HKr>Z_f)BG-MKI}d?e0bsXt z2n+ER#>}fvJR=9tBIyVe1e|fbtlf4w6^UUp@u4E&?IvWaI;w`4YaXT>40YS^+9xEV zCez2OxS)sku5dC=x)`^T{^+%vIwe~H>}{_1aQKZ$|l8ogohFy)IwiOw`4!4OM-q_(2K<0Pmh$u zbfEh%QEc&qWM1jD5B7tpqJKT$5gfktHcWL(!-m|QUk@7SSe=4tZ?wyJVH#!lAR*(% zLYLh3`&fRNKjyc5UyK#KDbzqZ9cztREObjdh-@mlMw*!N$SP;?&&d|QIZTwy;ec9(9P25N#Gc*;uf*6I~2t-JNaJ1Jq_HrTXo3>IQByX$O zHx&`vN$|DW@K}lade1{CF}`I}G%&~63 zVR(lqKw#S`GDMO8M2i)>2xC(E{r>A<1zA~@_OGp49P0M-6W&pyOYIdBead|T$nGF* z-wKY<83`32vk`-5Huywe(cSRqJ;fw)G+AH__!!V6H#S(1%f!0De$cPfAqNp)S= z-d(KWGfk}4Yq(|B_;7S1zis`uT|I!dBI?K^Ek*-n4_EA}XuCmHQInsrc!j1~bYDA6 zt`_nO(ae2_^ssgb%N-3YV2;@Ul-;B{!I+8E>&qgYOc_9xTSjKMDm`(!-%TpvVZz1~ z2`VnR@zTk^DO;$GYnX%Ob89mBeunZ5!pVHmrFuDoUz+r~kr>_>;REe%;9!lOSLw(a zT#mBHvaz$w$oeyVf%UC=1mL8MM~>;tYk|`G%*r?eKye%($mRH>$~wNxjA=-3i*va2 zK&Qe&yQil?l6Z`auvx7=e!xoOOAlE%Hvg4jJT6F+g>D5k_BqV(wm_+67+&|6q;A?W zwajb5ukzQxGL868WS#W$L&%$D#|(bxkiLgVxdd$_g}(kMOBf$jkY-qgH`Pg>_Ud)*_FGUzE{77 zT@8`=amYv#Oq>EjKU#mQtiDExAM&up9KU;1F~6|}RKcH|GFkGGozi;lMbHGj%t#h} zHe1D61NzvHnry7P#Nsg7f$_M4`%iCRDjQ`FTR|V!@?Uxj$XjpY_2zm@ODRDFK|V+y z3n#!)yuCI^VzLSgR^%XMID!0tCEX93c&Q7j8IF5eUL6(#^9r#2oz{>;*YIhjC0c)f zNrEoaiZ#y`*5EqSm!psuJRRV;yWSpdajHxg4x?0Hibcn{EqdBCg-LEgOf|fywJb|q^!xMV-~&}Zp<-iR73nyjaSbn;SfqXoH}Ka z@7OE8?V>7LgSJ zNLTYUK%&6WLP#MWW&M(@(-E+#6y%R|^7hx&fm<;-T_Rpic+JA@^Tl%QnQ@Ex62T!U z_`k{O(eeYoS=Fjki|FGvA>%y<;FX_ow)dE8P^%cO!gu<{WkVV9F<|hnFM0Y*y@}{S z2vRaN%QJ82!<}gL(&T{KIn0DexX*=S$X!*}-C&1`teEMJy9TDtqb=u(%8-D#lrAZn ztyNfNod&7$o)J8OdYOh@!J@L7=TU>hCDb%7>a|>8%@dujO1N`({&?nG&zsX`;vg(r ztLo@PmizjSH}_(ophxHo`a$=H7%KYkN)*I6nZcDpsiUCq?U24x@$cvGUujAxVa(B+9m;eP4NE{ z8)=AtCpkK|_!(QFy4VGpCPJH+e z7irBx?`y^-Z7rjdQx8I_fE&mOO{WCIAXWe9?kGh@&(`pItpeygY|D#Eehp{XDnB$D}v(<>s8iD_@jra(d5Rfa8E7N(L!un5Y zq&2w^!R^0^H6E|%6j5VM{>AK!rY-dQXc|n?DEMFChs8>RIMdFBsDTJ_|40w{HW9l| z4Zt*RlT;Fs!}(8}G7Vg$-Y^TjKE*Hc^KOT{iIu>l`xF|KIRr4Sn#}=v5>7*9py>&1 zPpC(9R}vbU!*c7(jQYuSG8RG*SU5xQeY1NoKb!RV`(QU|<5=HLwrgIxmo*{#9=Yu2N*@(a1CdwEc1g>$yBZyG80X4HrJRWpzi`fbqM^e8 z1$=t4obD58Hp1XWLwDp)ga7fuqaI4T$}bE`vZK-Q2VhlVZms@jFGYb^$2;il_M!_C zNhtsaS6AHG5}}eY@RC4Cy*SeOps~yw?ycz(6Vo8R-99+?+Jwt_Hz!AwH<>KuG`?t;vX`Tg-5| zW8D_@32-Vy2e$Rw{UTGTWMW7~Zy$VKJ~_~C^moM> zcF_S&{N=G`9lPp1UKauyp`a&#MuMpz&IVsk`KlBJ%!mbRR--MKJ;4IOdU5c;zY5NqNd7<;vMpaq#e4BuX0Ft)xbI(p~-dP-L1a?&x8d!pA&lQb49V?9$QxzY4 zn@^r`a$sI1$g1eUX z2!m_tRV04M+x{WL1A?MO2aLi|k`>?IYd2mZtnfaevKlyUpq&DQdGx+Jyiwc>gg^{G zxz{+*8*6GIG-OO5P6tqeZD{LlGA1#7b5<1iMO6+TV1b6f*zXbkWNZp&st8Iy_k%)- zEnw;5L4&+w;F1dY!>y;~R7P%1IT+4MeSp7XlAnv5nKY!V1Cy^mvAMadWu_z+(DPX* zgb>lyxc~gUXS=i$Qw9;JU>iZq)jnBhyn>sBI$|;50_WydvrGyb{e!g#a6N8^sp{bF!b+{hNDD6wW-&w`;N zH3LU2E36KmBY(5c=KG8;l}x38<2Emdt#`O`07@lMW8sVv3kbeU>RLV6cB~MT=8B0e zL^o1)ms%a!!od~ZzuQst3eX4tneGH--xKKyKsnaDz5U>0r9t|K_|e3KHl@hB;u207 zn^8l{c%Rb)(xrn0nOhFi=DP~(d1U)d+Y`8OI*Q)QPuR5aq?bfBL}TgjYE+;B%X?HC zjm8#D0_J7m5?EK_>%QR!5NW$+{n z#*^Rs=-AoF=mg;d+Kl!JvNPCf=U^)qWg=l4=kOM6S4^a~Wz>N1WP3Y;OK%{X)7q=l zc4?6``c?KAk_()zi`|Fwv(JuC1Erpz#dGkru`lKU2(}bkS(C}^?pBK148<~c`F$e8 z$u7^&U{p7*CO-9Z|3_$O~fASE0M8w1o;I=$!NklT9v~-b-V8M0_(gQQi~J zEqtlk$2VcOIx)5W@5O!*;k|ZQrnCdv>6!Zd)ndy(-zsHt={4tV$*i4~BHvT{PM$a! z;ZYlllhm5xxI{8a>hhN!BjV|&W3s-!beL9`GDmd6ECujJXm}ZhfWfO&bt>l$cPJe6 zj;Y`Qhd?BCc{hI3qNx7K2Sy^qi|-!#*pLauFJEoLRwbvTy)2*!#HCBjO2Qu+Pz`>4 zx7;$0Xp)&s1>i%wugk`rfLmsE83h(8_!JyChOsyhyp5DIEA^Lq8ID=fKET^A0`6Ho zYUMxX2M3RK@DIg(JK->K)$i%I0%0L0d$d~?sR}|#j4e0TP?GTcXGc#O8MYJ+%?zBy~E_5r}qqACCq6-?q>yzbs=T=`_;C88V$T7F8nJUrIgY3u2E zJ}!ezFN#FLkjZy1Ywe>X>-lX(G9sF@>Vk-~je0w^L82R-F&GU-o~k{1~^K zlmm=e39ias8h6)h!@ZgZh+T7n_bv|J=lT05T~`^&649^t5~!%;l~q#(Y;lUl+lQIH zRsMROTBEK-RwtBd9m#U`_$jr=zu1y>vpagmhzF!> zJUYW~7ca@x{;()=#?A-tcw0q5*T@Mph0p--A#6`&d#SLVwGu#vt}dV*o#b!;UfXC_ z2pu^!_YfuqL*;4dn087vro}Zq%@b-cCLBa=Sw~{)bv}TE!RUfnz+tfAP})rm*vR1} z6S3UJ#uI1{=B3>GwS`FA5x2Wfh|xm5vUXN5*zpN7Mp>;4P@H~n@LJYSCQGtU*YmIG z>AfUsS3fICfA+|JZT%cHK%gqGn08G}q}X-O@ul@lF0U_8mw${~TP!X!=<;@s8nn{# zP7l@m_Wvf9>Zwe%Uu>C_$XCDvBgu)|!R-BqubRHr*ykzZByW;yTtg2;cGGAG@9GI0|XmR#9cT0I$SG;H* z9vMxAc(EQ5M1nN(Q&sdr?!vvnTN6-?{8D%I{LbL9Ne1m{J5WDCh*zX0w97nd{Ah0< zlN$NeF4t+3mxOzh{A7pggTYJUeQvQHT6aD1REJQ{=BGTjE7Id;)OC!`ILV&IdzuSJ z#J9Mmpd76&+gKmF4CPZwv+G>Y>zJ_%UrtANR_%z?!sT~H7ms0os>} zU2#kUL|X}p6<=Tn2rK)C7GBAm>&PZtlgj183F<4;0V)EYVle3~3T}d$J@5}wVT^b4 zyTQliMzCaa#5T+gourepT}kE*d?%4pB%CsK7*Q$GA{?bYNT~72%v<`pEc{aCioCmU zXt+r6%Brjf>GrZ_x>D^-*03{z3!u4rL`>jnPmT{tBL2pyYCilTw<_2l`cw2MJ&IC} z-#d5Wu3vC0^{~$vzFdfC0jRH55*P|mWD5v087US&`tKtma9bp^;vX%S=IlL2q>}hk zO+IQL>R6t6!EZ5)^~WFRmYbB^8}p=KRq&}@*CP?NMYAy!C~qfPQdZ=95bP1|Hc(2< z_yYb`V9}S0kp9EcO39gu=VQA2G!OgGzBwVMBhLmuZ#F_lOU0fpW+c1Gw5WwZ+FOII z0b8Tjk=YDfvR%c=YY+uP*?_3rYlK)b{Qd@at9Wbb>~!g^+;%f^?U96gqPH3)&6sXC zB`&TFiT!NW{;O`uLV$Wq+=X@iqMK4GCp);FP`(!o6Qa1K8@8l;J4z{i_aUZM3t5KH zt^`O3!ggD%*=mU=SBvEiRZ*`@6j;DfTtHsScIG0AS|8$-%OD$C`=>M9+>n{n?(J(B zP@=d>RVp^)eJwvHWmvgtf|f|06B}vHnG_lBx)73?JjFon<7gA+dVJ60PfbttjxS+~ zCyc(;-4%(rBl3t(-po3;Z5e$3WXi?5#;px(Op>KTII2?E7@0w?fy*)NUgc5UR%qfH z_3zGDXm`;z4N|pinJ*?U8WMhP6q)?nX*%R`go=%?Q5}kuV&v-$Q*~zQ2dx|7{!@1b zc6-#5Cu5?hb-{IsqgG*MB}{7>CS8SUN`_w943z9qeUX7p$4yUF{)+ubOK|U2zM=q# zm8);0)W=DmGQ_`J$iN#*w69R85Mb^_LqR%Vj{On5JwCQC*0lin6o2*$Hm-o#K z@6Xb+-qP3>Gc^5c^zGA6J1~#BzA|o(VZ+h%&~iQ9>>ERj=dx4u@Xq@>*ub5`wp;2D zpH13s-Ps$`j+tV53zLLLQokOoOrF~E2Txlpx@#D-gp@tI`|sxoEv5odQe`~3n~z@^ zg2?*E;5%R|BubaJX}ffOgt879V=(DWoVhk96&M`VQw*AT%IkokPde|!nU=^1Q!2%R zL}LuemFv04esySo6szOx$*zVN8Y%*MLD%eniwz9vWbW=LF7-lQb@uO59o)~dGL>=R z#-IPx^{VKrCt_!_ww6ItAs65jmaxWO6WmQpRwyA7K0ZK{JNJB>mHeZp4xEBW8cMC* zr<-+utXwLOCz23Cf+~(5!@4L=;!v(3*h(tHK8t&T@3&N3=h>Gt#aBGYp9u;V|7CWCD_c6O-}} zz91T%^M%g3_zKLLPQ?UtcVm(uNC7;Nnrj2b`Miag;z2(sJc#`o;;3}dVG196oCQoW za5eH!@ph~7V}8L@ZosL}ro~fDp8Wp8e*5Wb4vPoU$qr={&>v_0#g@GO;|JNR#e|!my)jH&S`K~u0 z??bPXNrXPuUp$cV^}}^;?!nt+8U-Qun`j}HKzu0|l&k0csmtfI|AK*Z`FZeNThVse z431bzysW1g$?PS~`upiE&S|VzqUOFjKeRcP4g*1!lqBGE=2aY zgR>DD2ibHp(4U*I{fn}U{C|N5ecj&M2a&^>AOs0eLJ4?`4Dag_8APoWm7?Amp| za=zCnk6)8-^`{C^+%7CTo_pY2c5Jowdu!0QcH7NH!X5($IY9isfyuYGyQF*1&+1x; z4G@aDFOPACp+j(=Kk$=ob_DrN-;L7qbFD#_Jh3uEZcIU^>riC&Y#n*6!==|0N&Vjje3w|0SG z#G$QSjtQ*~v0+&&b;BcZJ8^5G;oGh;;PZ((RWNQU9D0Ft@XzkRXzO;MS3hG;1y&}T z&_u3sMMY@8G&v66m8U7{xX!SD$k(OsQh?6%^bd9NC?dwD&q9&}3+dzpzEbH5JZEb> z#b@FJfEt{IWOS<}SUIdoB%*vBydJzVuz7B~ z60ipYluZBwGF7Im6La5_>)$Lc07|BQqf6%P`UevWTV!O)Z-k5#*cJ$MbR&NqJDG%W#dq?xP0+ z=qOC)YL=C`NESXHril&Hbd*82F;!h>QF$zD()iY{GOKk<+?XEmwnDa8M#dkT`M|A! zkCR9F=P978DwW^H*ORHeYLwgN;u&0w#qVNp|9zp?=VU=v+~jZz@W4-}{927ePJH?} z*5zA~pE6FP5(b9CXNiB+BV>=8i?1ykGI-$Ikge7>Gq}Ad(hZK_w?pk#m%53Wa@H7>}?x? zyRX_^UgAwcl)Sy2Z%sfue1Yo>61c0XR z#^_KiFHDR}D|koU_xMnbaL>}aWMbKkwB{N70k}vF7)5V%eV?Zif9y)oh@Mufw>5Q5 z-q*S&G_(mBMo;mJjBW7}!{S`{e}9SVCJIrETNMVW+I~HPrsx79+QbaRE7bBWe(IOy@3WzvNoKdvbbEKLf**-Gd^2`1+b|Lr-eN=hKx@$(H#nROn0{GX|O zR*52IG7f54@A*xk5HefX#zi>B-ZG^~e<2d>JOf9PfkrixNaJ%7?=wT=`o;n~si%^xR244LasaxBC|RwrN-LOBLrU_}Ra zoG2NZP|+E}N`T1EaD7*{c&lP)9azfot_})zfH`m+`;+4$Cw=Ul?=a=BHA3K7bPFdK zEk^DbG=@fC$*C5b!XIX}%%0Gvfa&bE&B;Pv`Do{w-0)W~X+sNj5mF9fsPnrIoXm$6 z$w2i~&CXpz*$?4{i#$e6$y;p)rBKB}c_dTNQ_v}lT($%mFybB~!`5TC+c{6Q__%?T z?eRi)MwiLbFu~iK2fKLWXxehp5qCoO1Zk}_6EgEOfC5V90N{|+xe5LsGLYPa1|_bK zPMd{1z!8_1C`*>sn?@mYo2hHDPdaZss6{PyLHd^-yhW`fwM16e$c2nwFKSaz{Kk0~Lp$F>-dq9d*Oq{M7*6;06|{ z_UFjLiRX&WOFN|eB9l9()e9G6C(2hQWxnzO_YkS%yaBx>Df(;w6LgW0%dlJ1X-eO) zq;!xlYckEwAO=LXYniHs3(ED70#vTvB>4 z{+En!3M%OB@0R&oPI&;5^uAIWmk(bNI->^j7}72)|E8kFfH2SgW+vxLmc&Z@j<~Vq z%FtQI=8wKk7x&3j0rE&|c{#GBPp8}d>hI#jmUbj45=mUhVlJ8aPdP_kbb2XU^w&%a zL?)^{5ENQ`OGZRrtBzX0(bU|^ffd*@V$LAmr2pjiW5>w{2W60$|*< zDY6B^Fb|8hJoGM-GvcQXtp1dAhF!f5{#cCHh$E#+)`iE~3 zq3(QJd^E=Z$}4y6BExo_zy<-NAx0gapl&wyIjV}36nTB6w_Y&yZvkZ%O2qJFnyMO3!#kab){P4?RpiuK}E2lHcdf&*Dniz7( zI9R}0vf_9zKQAs3Yo1?Kr`@4*=puD>=6a15HbLS4Ez^YY?r95Nz%Ea5!q=PG1@Zyce_1bX5NnooQqE+ zw}L%kFwaR8Y`EvVHTNGbaY9BS*1(rkvyxJ5R(Cgr+PoT7;6HntB6Ph)wXzdmYaaHMd$ zM7ux^Pb_yprL8D-�CuiOtukD;Rm+|MHq0Z!S*MA^I=-d-iQugHb3tH0}3!{F3QN zy?)rA#t$C;_>t-@Hhu|fsqN*84-wS3gy19xlz($bSe;w(y02Sk(nRs~9NF*8a_Z7@ zy^8U-`;9FPL1%HSQ6Csu>bF;-20fCGV@&!VojD;NIWYl^<)6AiBPtbE_|!yDf65!K z%aju2C5hF?100l`F096ga47lg^3s~R?IY;Jv2jpC+ik1OLErc43JDD1L+;LBS4pZL z;nL8L(+X{?A!@k#NC7lXd&?Pz+Ez>wdyHBM(f+W~57~SiMInS>AYwkwAy*U3;R+;R z=H2;qW|K#h7>@hI35nGw8jaS2cAn)U1FpmS3NvkpJUYY8eE1ju!e2Ew>u<9n8#agH zFGy@M@{R8u*`~5Nwz3YNTSNqY5%*mm8HkJGha6zWP8_5cxL*-Nc7r2G(m<~r)6Tfd z+w-V957*UzPuEPxaAGw#@SKENKpaO(7Os}=*SL^N-@_+}kRsQJyWRavn$Z>~dV`^> zHZezi@8j-hY&5ZQ3fkq{2#jV5zv)+S@4tNlf@G?DZaEr%oq3cjuovb1Dyz65R-AH) znZ%jq`|?*!sZ_?XRWb~gta7lG;uDU}pNp+NA7Sr`vR%i>FJltk(U)sesvR>N{VBmCr7{ zDO?b8lU-sL zwAC*SsK4T+NA3kRaGlSU)qv-mZmBQbzRKWgTo=s8pNoo;7^hV>$_`70-#<2ilss2i zyJbVn%i$!LQzdb-c>>3BOpj6>a+5jlJ8w6O^PqWvDQsfX$K`4-)#2l|z!E2kwtU-! zeVt^R)vPTJSO6I8_9M6j~hey{$5;z&8+Y@-TTqpL&ob^#W@$l0g z96UJS*`WxW>Mll8FXHli65ev9$ymJ??7&|P$j=R&WJMw|Xg7ef z`ceNfdD-BStRYSxLgB%LOFLQ>b#I9V-xE-bP6OMzBG`TJ1{y!G@?Txw%J}%}>=7?> zXAny!VQOT~d^&poW$SH>l8qye4-(KNEQP3l0QUF|!J-UZv`|9%BoZ>IAbG4z|7sV` z(QD+x3KgfTE6%b6SkQhAW8pr*H;%cy_ZI*>qEvd)jo%+70D3&_`8{=XswAOfn8$hr z2jc^;wJ2iTfLifAH>-w8RL?d-!@?BhbpiZBN|uGHINWd^f-37=+GCJYLj9&|QIr2P z^Pjt(bGHN%@PTSCfUciRYVL!lUG?r*m7<5BY--Tqt#RXE{Oo*M^VvmxJ!BLFt3jj21d zZK&JKl=xA$?DF{1siZV1H^2}E*|6_-+XMT-1)EqtuteJ;I*svhfNFVh5)tRrzQ`NV zh*k8ZsDFKH6CjU;{D0?H&xI;Li<1d9G%+fR$ylmUpsPx3$J8;qJdT?{X=5);9Wal$OYx*^H+?a4$>Jp z*`Z|Fzac#7oY#Ml&i!{4nhPPr{%kICt=jI=?Rh;{fY<#!@+z`doac=BGFbZ@y2646 zS^c6N%+!nHa=_d-W;@}g)Esa*Lc^4+O2i1L2lpyf3hMC(ysSzT6Ev$S%%JK3WXmdM zq5mCS=v$r((9z9h%-dvsDm!0yPH57fD$m`iG^_=)za`bQNAOcEVZjC-JVw;4; zRz8u-a6M}L%|Tt%Xp97$i+FXCZX~nWb4}e*^<5iEt?deZF=Yc>*_D>b-ofp&Wg42Tw>9t8Kscow zd^WRi=0;(dAz68m)Go%tS|>ed43HhswG;r=s0PPhJkV8y-8HxM=otS$XvG()?*!_6 zJIc=(=-!X67H+K0I%l4q0m9j1xXWmtw_BMOoyo%M9^?H_7tY6kI$C4=KK&u-%RFrQ zShG*s$98z&IVHg|c5{c~=yCP1Qw-?pdJy27be|N5OE8NDE3MtiVLq*Bzx)sZZ7_^_ zT+EBWS{l7sfEdXu9DpX<-xW|}Tjj1-&ayo={^%Gv69c;1_kYmNS3iPWh?y9@w*JoF z!4sB7Qwh4zVFc1c}46RbXaB1gqHo}K-f3v@<`(vWIY_2!I5ogY4?9ihkyiCyTm zT=o_6r2dxBSYfD1Xw66PFE7OTH{CAzv-)U@xyrOoTLc!nL_OoIUqvN0@n6>;7Qp^6P+oQ57(h5pRpeP zPW~Uq`cV=3{}rXa`GEP)nD>RsYGp3T<3I1XKmRzzX$Bnikt76hZa! zwIT*BC---A3(n&S`6q~eI3R_t9Q=Gw(r9>hz19FGxP2_CKuk*#p0k491oLVZx%O%Z zKw|0`4GXVXUc!m`73&Ps^!{M(j!Lk|%FU>V``1k`Vz{N>oYPJk6n1>-Xj1cEpU1th zk^zY$b?QmdF*;aNy-1nr@H`*M9%*|d3{}8uv<)8WA!4-k4JZ?H+G*B2irA`-IvvG5 zSk}FV$t2Pnmc@08+lTRdKWtQ^qAv^|I7k#D^_6deux^rwC>Se|11ApPq<|k)_mV?DzM$3w=i_ks6l9aiN-%g|d7|dHaHw%c(JETX9}&&9#BW)JtzLm*oXfB1k0 z@J3kgjj48;dP%hC)nkfmMk9-R`)5w{S@hvP>1zozJTCE&X@2&Uq#PvX_A>1-85b^y zV+@RWIpbLrv3pMb>7MY%#N6=Vlu#_iFx$TzJL?=(acA3*9{^or$!iR}?!NwiwLQxt$pf)`q(0Tp441>79_ zz&M<2GN++=2OvYM!YfZdelkM5XAD{^wL}v$ep7q$YBQ2$G8vMDG?F9v$|fs}-RaoN z2JTrl0J%)CaS$@b>>1%ll?r=61Jf;(&|%5YU;^c+*NymwU^p}=4KnW0dm!&!l2e55 z{d5sD05Z?M71-esk22gsqr~?Rm4!_!RsfHj@z}}yjr>W58Lq;c$;F@600Ns>RQX~*CBZ84$h4cm zHre4K#=t@D`qnqwVpfRi7Fg~I-yyO(-&uJ&d7&gPNXK(KTP=^IrU{X-fZ z#t)6Rky~#kVOwMsZElM`k<+#gFqg;?=@VcSc;=mGdxt?HDHyMqLo1{CbqdMlDv)3c z^j_mdkxgpzGRx@%HYH1%ylPXTv1o1X^W^r8%ASF*%@F)j8*@G{TZi}$P{;7mX{3TS z;R2Jl#E!0gx$GbLEVJHE^1$q8J)X*BC zSlN|*V>w8~#!xh^ymv%*l38G7cz|#M{O^)Wlm6^B{S)@VQ{=dx+aLan<@y6^?-DGa z$$NQLCg~c$PeefZ>*8)NKx%GF%F{>edW-0hqQhp>_rHg=Gzzd-5O#eqoEKnlB5G8D z6MYTBUwuyaUKX4(q c4X-7UErba1#hpdvj$wOr;_{qIEdT%j0FGn?fB*mh literal 0 HcmV?d00001 diff --git a/media/so101/leader_rest.webp b/media/so101/leader_rest.webp new file mode 100644 index 0000000000000000000000000000000000000000..8b996c66fbf371130d08c5da1cdeab0688d1077a GIT binary patch literal 39650 zcmV(zK<2+vNk&H6ng9S-MM6+kP&gpYng9ThrUab+_m zT`>On=2c%VNw)g!zvo`09z)^N0p0CXF8jMc{Dwz~3@$A_*oRVbfA2oECxV4-0qT~X z>_fs>a(6^Jlasn3)SR8s4y5cB+cymV-vI_uLdT?$MKfEdhN z*1EMKR6kfd5bAv5yNj(EttIXkf#BO2h@+Y7xh*Xv@A}eS=f!2F;K4PdOYp^SjF=Fe zcmga~Z^fe?whpFW!K9gOXuoxaYhYN8PYk=OM_)J9Ej>9WCJ#hDi{GibUkqz*L4rSs zBMMom=F@C)0-^+7+jv^qbWEo=w%jpWXjLWVB0M%*VWYUD*tKy>{!YPeE1T?qD#LHg z=XNiO!8Ub&YJ9dBu+h~s!EyH>zSwr1g(2?WIa}|obMXx9c=Un&2aPd=j5N!kCcQwQ z7=arP*lsNx@A&nkw2gk(?Vo3;59g_BwjxzYMm1>7X|nQ+^&Ls_abe+la&F<-l4zym zk{A<~p4FF&BpmEt7K`&;(7^U)@8SL<-%4ra*ylsIz`4GuX}Icg1K$(CkMHD|OF=VT z&wnfWXn;M%rpQa?i$myt1ges#Kem(O?X+QyM>5^dFpT=(cf6H`&Zlu|9OYQ7UWx?m z?bbTwaxY?o=UVk)n#v{gt^vT8uFkhFsRa)`t( zuLIoI8A=@hfUVw~R9n>q1H2{9HSfOr;30ErPFL+(K?}odKo-sW787kW!}_?n*w=qD zQQLq+-?S&1XSe@-K|!X+h(zrl-P zqNOZ8sdM}Twl!6U0l(-)z5Km>gwJ-p*mZb)QCj}295Nt}K)toLGPC-+!#9eIIQ6E> z+))F4t6~TMK$(S$4IN3z-J*4lUDQhf^>aPS8Z%&KE^S3@B&xU%z1F@NV*lUkC;rA^rqbo(9ojpCzgf@W8|ll&NPEWbh;ZVh&arrA6@e(c`P%eza6z&uyN} zT?#yRKagOGmw>=`?z+Xd7$euwN=RVvt;q7-|DWcp-P#ScEBIc|X11GpMbe>=-glK3 z4VJ~nQgVE(h6f`LY&E@(`^-mJb?KLgCdM9D*O%sT@JLAr_SNlAflIDUPNSkY!n(8> zIT{VGjU;J~K2g2oTG#mSpuwm}dJkBbDS~g`T$TWktuf}iJ&#c=&2@8H>cdFk-mIf1-yewFws)gSG&cBG9O>Rtgd^_0*up?jABws z`Yo(w*=(e+N5x4dMLF-`45RW3u3`UR3{nQ~eAv%M>u>ct>rKa)mSWf9l8T0^KU+nMRcF;=)+?T>1Lf2?|TL(JfhNzAHxh`LQZZr2}X^*ndv`=h}@#zThPC$aJX|cGNZV6gA#?z<%=<)DsBK03?SKO zD9;dUmjYS1-h5mejSCn7>ibHZbqDTy?NB5tSK7(aHF9Wuk_daZFQ50RWgWgpm_<%X zwz`Ptv8P$jW%Ms%Nx(X3Q|8|Dgn9K8&zPCZhwFL#5-;rViUwVhs+`BIB^ywhB6DEx zP*lZef*VDt$CSovPnS2=01#UhqRdqQJw9uAz16DS^YSdpHDh=|xZRU)=;w(c;l$fp z3=eK6Q{aD&f}1zybkVs9!FbkVas9ox_8XYD4kVHXiB`}9+fdEgc0)8j$h;?RB|@(v4?>fyvnjUbA3+3Ye-|wt4~kPKmzru{o5I(8ANvf<~44h*n zD`0OVLEeKM(S(|8JSH`f;+F%1DgOab$Evr6p=X@cJBX zof~ulv&h@`7fu1876GS4LR)+$N}A?iV@6*Z9|m z^V)q@ruT|)pG&gRtym>1;w0Z(X^p$TO56T9&x;L=b{?n!&rQX5=y%5^OP_g1g=-E@>UCDia*yCB!dzyDT~i)00&yAL~<6ef)U{ed`b1Ki}|6?MP=_!~nTozHz< zJNbasL|6is0i+N2Xw3LOT{ENDCcCF;o69-L!oijZ1K5JBeF|fhLTb>AZQux~ZDQD+ zBEfT#eLH{a(M%TWD3K(!qL`2a#AtWG>P9AhFAT6P=DAd6Lf8N+=pg)k^Jau}jcI%z zcq@HU&=MX6+pT(me;*9U#3c9DHXxi)t`8&LHfU#_q=6g7|0g#8VEQ&(jada|bk zsxmKLN4ySgO>e8`%*3I;-H3H3Cv{%(prfLwg|)ub@%6~@E{wKgciP9~2ZVLuUA{>>?$&8e*UauTcNmitaA3-=P^( zFLoUj4V`$1jmoZZtB%bukhdsl&GAp#=nqt99%4eZ&kh)&L<8`cjZf0V8hxPF2t52x zboYt?wxj=?8{YAC`DGWEI!=^cLFp-HSz<)w2Mf{d*M>+hT>0Q!5pNH|n8NA~qsqK< z+(U5|d3z^OCDRE0j#}{!Ya?Ta7!QyJy$6Jo#nbmlS99A|!`P~r5*q|+by1LgyX7Qc01D zK)oO9=H|}MCGa?eadJDQ;14g`z>wv;y?h49pv-EF!56G0m#XLUNQ_Ml z_}UAoH4s;wv`pEyY;W{?crEm!;z%op*1A*?I5L&f(W|>0*Mm$si`%KRY#-4u^QKhs zw+c*8=ARUiVWn_#)C+N74l*kF$dL7fcu|RGNjOcM13>GQHxsVbVzy@!{cBn!>n@L~ zcd#@fj=oISxqADAKtu;WAo={OG7WnU_`r!5poW9D*ZWa+P7dk1vBd&k+6rm-T&{Sh zle1;53=lDiZEb%=(WnF^Lry=^k%B4w)F&`Z?h*g*wo^bi{{8?<4sx+*GGu?i#3io4 zF!z=aX%)7CMeUZJo^S5`JKx1G8?xd8u|xZh(YvaM^JK?B4tV6En;AXfK85evRD zQGLp?GlMezO;gsyz!wZ=#z9Xw9__L#WoDorYT%|nqdf%F_zn35E^#Kf=p7J;A}e~M+DX1^4hHkQ_1-*{5(`&Ku0lCF=&O6_oMtOb zKU=0<0Bup?s<>T5UQeXBjyRAyXGiNSbCLWHGBW)_>!HIj3k_-mwgb*vHrwiCy{zr_ek~Zko@K&+N*CM@+VZ^ zg6A%%RVhj#oB)SvT+&KWvEVL$9AH|!=)M(Lb0H`!2v(~xQQrUdt#TWm%Z{JUtLT3_ za3tY~hLRp$h@yy8uv^tAXPY*bJe#^PV@4-|Etc2LmK-HX+3@;pIetn_lwf6sN8O() zkJPUc_8;8mG(xTv-y2-!!3dJo+ol?IyLx@6sDbF;{3(tlce;U(X+Klb?H7ODKwfC0 zqc?v2A=J_ce(GT5x`feGl8Cub>gJk4JHu2KU4%nzsS{mDh)ot}XS-y8eg>rC!u@7* zR+D*^XC!P|q(v6J5I4}9FJ+yS9X8M2g8IGC1e$CDCg%D)Ah-(czdvcfX_`$@$aaT;rSA#Hp>a0GC%KbnBe zZbX9v@}lrg%Mz@lLGz$p z@2HtRF(o(BxQ^CD7}`FH0h-xI4zi<{$cal$K0#O(RZ>a-P!OW=62?&Q|sjk72M_x%c!1Vfik?Cj3+DsGG)-hrH zjS;+8N*S+hiA1`4qKukC6Nd=BmFb!cnU!&3K352;?xWo+!Y2V4t!R33X5UPymN!F2 zLn8jhy34kBgKLGY^T^^Lg8LGI=?1nTP5pQ}0S8jHk8X;0YzW^I-%6)hH<5WoZ=lo( zzsj+OH{6DrGSZ-_DcJKx>a-@7umRoSZZ)L(E*zwJ> z-n0iZo0_hE*icgvenO)vMWTD|1plSKgjHE;8}ItKt6gcan#mk)1B7^sVk3(idVPtS zIs{#elJ-GbUe}#V(~QuTZ4YNN^IMd7Y@-)ISbSd@`aq#CEL;7YP35S}xQair;`RZDO`3#U+xO+mG)p;F} znv@mx6@Em<@%EIexO}a;$i7RqH#pSwY|6s}#wvM*jwx9CHw`I_Y3J^Gg^Uf`Nt%rY zh@oisBXjcKFWlCj^wuU0jfnkh_^WrH7fGKZ$8i;C za6;#ed7Lm~%Tv#o6lEbJXNzCx;*g0*aG)nwR!7=qR&I=Cqf+s3@lq+gC__*f%dMO% zekfoATez9=20)(Sg7w+2`=}vQ5@?Z6{cM$&xlHWQE;9EdXL^tVtXJYEG2cD0?qR^w zZxnz8lyhRa$&E^IdODc)M5^uYWhBa^ZRZ~um;Oa&p+ZjrUJ){m;(IXQ%2cmR30jt| zLPlH4Zz-qH^Sr9^3m85S@9_apZFT?M-Q31MC5h>9Z4u$yO>|puK{rvg!V~eby>+iv zu|P{E=sL1?uek^Y`;w=Qr-F8V7U$^z7CG>g9wR79H3W*P!>D)|L@~R3w8h?A%orMz z=kq2d#qn9mf8Wk){5bW!bOeL#wD);&Q(OUPy;)dcIgex+4xwG6BSnyujh?Ap^_&6uB5u)o$&|O#xF` zY^Z$4zxPs-rwlq=Bbf~5C#>LqGarf>N7T*MrCxMOxK&TO!4!GJ9jES#=%&=h$kEd{ zxM>g&o?}q2!n-!H396WdW!<}Khro|n9Ietqf6Mw^^j>m314i;G?Gtweb@@S*so||B z2i?gA>ln-T@LRR{LJ#KrarXh3J^oylL(4QWY0<<4orK^3=%~)s_ULFX)(W9USFvRV zCAz<{f1F3gqU?JXo{i134!POf#G# zS=4RiyUQ0tYbW`{f~nGJ!Oh8dh?so(tM!;#o?4uVBuygKK&yZz_Cfnwb}so)znS~_ zZkZNiFQtLrVI*8Q=s|J{!QzUw8zB^Y@Univ(9gqvKE;qlxfu15KsEYr_?a6BMiUpC=$xztkY*x zu+}2%N=eKbg2u^l>h&nSH}IRX;|cL$P*kO!4fSNvA*roLA_LEG-?C9T;+)BI;5@VN zHS~h)n}a=mu2BypR*gpBjpseKx~h|bV*rkWLpaZi$N&~H!fe)y1_;ra3TfW%BtlrE z=O$j~s61(xD*H!T$x_ibDveK0+Edd;C&g^&MqSVW!r?+Ny()owMWh7)$vP6#qxvH3 zPlBuC*7OyDcMP8_)0G;ERumSz(UKBjkb&Br1Lo#0$eJ$SQvzA}!I8C0mFi2rk6$iY_hr_=!TV?3e+#Pj^lpB0flWQV`=2oTBf>1bVs-fld=VdQr z&PpMhjKV{#cdRsZPucqB41@C=tfe#%tbS8r6E9$V>(a*~BF$<_9Gwu6dLCz<7VeFt z*}><6zH+X^wr_IsAIcYCU2w0?Ln&XUpSc$h_bpmldIug^AmkzBf;)uol6CsJX#U1? zM^QGOhe!n?pA0#51Z1C{)b(y^KL85m1tZn3W=hvg=e0M_hqF?Ck zIQysR#l%Ba7IOq@isfe5e%n#YIeSh*NpA2C6IHxA)kz?fnFO+6)SBWPfqP3M?48_UMGr@z~p8peUYd?vN+A%4Ckg5F`#l42d%hP^;6*T+n)U2%wDpoSAt4} z9ME=@MU?++1+ZXmG^u*aK&Az#P#I{3dSzD1jD*ZUF zu+Bi!0Hi4w+P6Dy0N^7O%#VE3m#SpCBHborK#XU2iVdVFdofnF_Vb z-;$I(qAeQh4ZT7^j)k4Hfc=S5f4Y6?8W_0H4-QEwFJ-ro7S0lY^0)Vr$db?+885G^VQ!If!+_|njtH9I;^&38UP6z2zIX3~L0;J= zLiaI(EJ%@NWZlS()7*qKRZh1h_aBQ&q?aPf!Y-o-wto6iK2g2c-I+sHireAr42QhR z)l()+5Y?~;Vgz$0mPNDz*<9zcRk0FRJojYRDGbNH-2ZSqVNXqnzSIOv>Oq>Zu z=nT9Zu3(auBhaE$M1*7*UdlLsafl^t#LU65X3JJIZ>6fiEj`y~&W-kZz++oT4Vc;T zhk#6kg@-VzG=A6kh5+%epo}|0jm4q~Szt#Xz$m z&L#IM$P2&f8k4$(%?+En)AG9YVG9|w7Mg1Wr1cFxZ(>&*+-sRUe0=+R2>P-#zd`ED zP|?;*HY5qG0Du6SQ=>Kdxd8<= zgyqIHNXxLM&z(dQn|@O-K#-Pxp>c2Jug&`I5YDw%{yZfvA8(fCfpw0#Lq?B`3^c4v zL$?!_kr--GmIJHBsk#QRH~y_X zzT{<9H?z#`xP1f}G>DEr#1#RWARUY#$zmwLizJ9Jd!z19Xx!~a_57c5O_94Wox*Cu zx<4bTr7&*amxBHtN1o86+yXja1FLjm-EE~?71zgv=`+6sD=0|MsZVG zY5)2JRP2Y|3gLeEo>v!<)VvKTBpB3#`9DA!ekRW#yBBXKFmWwgLeK1Ko_5o%LV>n&=Z^ zgf{E;JQKiKjpxax@u6^R_g)2_w#8!O$t1Rpv6*^HuQ}M-{R*r;>}HT|ZcDdj z$NQw87NFs^T$KvLn5vozPfB_}2tn>S#_9DsT{?Z&6OlPPqWAD3UaX12Hf#Os#a%MS zX{~4!wc}2~-Jgb%x%TH8ssFw77N*9?`rLdckg89E`M~oO?Byh~=qgZA+|L5vAQ4kF z#bJ7j+#eyINL0{2pz`&{aZ#yNgMErUYuxTo#cY@+pmzM*Tx*h4mZ<)tUI2URHq`S7 zuFD<9ZuP#Hg-EOg*D79o7XH0ScX90;9Z4?C{d1!tO1!JOeF>+SPeP8DOT3P#j-DMkJ?1|z<^(#mTCnBY9t>b z|0IwDkWqQ{LIj!(R03Y!trVJ8XX`UHalY=;a!(W^uWOMuX&J0$o!ybxR}q)Wi<`IK zK=dNs_Snu4V@8DVhjyiUeIel@_R_c2Ej{MBRMkk^A&z4^Nk$o<{KbYDK%$pNSw8fu zsufSyr>Gg?1-v^ttP{)h+21(iKWxT> zPJTNNmZ@MbLO~un_kneH2_HO#MFoWQ%XSh{cb$T@+GbN!No5<0aH>qfr~ccTNAlt7 zSHS0;sWn=Mg+FToIn&gUqf4!k%TK$HOCPK4n%2iGf}un3$;sUgDG~@jSu%mh_u|~G zmR)9KdC{&*z*C{N6Nb$$Yg$LPM! zzq~nc73myrC_zkXAN|__cP^?y`yS~G#Cn}5rfK`&P>%W%LQD~AvCfMhV?2fn51bb< z7WCU1YZ%;rYPKMHzxJpnCeillO0lj#{!97(Y;?Z%FyEi;s04L=<0mwe0lM2A^8TT} zs=2L1s{q2)`d|IE1gpk+DPY1NH$Tk1_N9K~dSla_~4lBq8iNcR=?# zT)3Qf5S_EQtHJ1@f=2+TK*0lNH-BVwAw1hXKb>YQ!*@hng4KqTum1UxmG^=w%T52O zIvf?hgCVT(MsO1)xH0;)a0Jf%Z{()VLo(;AQ`6JE?iaZ?yE$kM|?bw_b-YE67H zwC8;~Sfo&ruyq-joW=K4{j?x_1L~9}9}oZ?V@!XLj+Hta1K9;t$=osO9<1<%&4+NViTF2JBQw9J=ek?gV%$y z{1G)Zl@zEO0s{vtOj-->4Md11emG=ILI((=E@l|RDe2i8oqL=?SAd>nY6wuXy2qZ= z-?=7v3O<@(XdnXv#8v(Ae8=M);Oa+5jiRpreVBmG%R#o>C>_%rQfPJY72KW=U)?~exj%}006mp& zp^_!Y;+ZH%J;}SP7~&bacAQ1DtYqy3qg76#4W1qcujfhhpl|&I!xRk6K&$U4T+t^45>Z2~3ByP`c^74% z6GQ+zb}u9N00$~2lqC7$`!3LdCH2(Xf+GwZPMJRe4}>G`=y$qtF>6Yf3jr+a9a>_zjPzR>9_eGNEI zc8}Y({P&0Pf4P|c5k&DFEV%C4bhd}IS7*83(F@(A6|QU@`%^Xl&OjN9AhCsDDDy%* zKtcy#-NM>ULu^AladILm$+@#4H7_15P@0cZqkrAu>q=nt07BK$+ z73TrAy%!5qfN}Ba$SmNF06t4AmC)8A*AQR;-3kmD(nCD&qBAmg02zMc2Uvt4z+}rR zzyt)2V3S`^<+uV8`C&mw0mbH^vfbi1zzBKAtbV50A`}^|)`6jQITwfh!@|ZE4dA(TE3JCwa2tRk)s;b+ zj1e+&ge_X57j{y8BJ6gjzgwrIUjYvG2RI{&4X3J(^*YUSI*x47MNM&)1w0QHQY_3u zMvYP9^@Own^=B1pAvXmCb-XO0s$FGqOeD#0g2U35e03j+emuc&_3#jIf0ku=l1lz1 zubS_ILG`yGZ^VK3WVJHSzZSk#3NGlf`kC3V4d_m)G%!*+WkARgO$lGqPkf3FTyMh@?ex8ABRSC& zBw$rvD{;#Ss!TJ6WCSWGtDfL7+khFeos=5Gs={=>g3uA6#Z+~>Xjw3UbP7M@UO-)Z z?zxW^*>QeWDb>6XwhH9JiU9s^)_?=v3(Vd?b;d{KnPLQDWf8&qe=spITLbk3k;AP2>Se z>^GMcjwVnt8mYx8K5*O3N-?+uMktEcd;k?&W2KG^WN+NuSFQwjdIYA05FcL~ObDhO zztw>eb$XB2Rr)})g^Vl}J$#us@B-gx+LT%6EE6kJ8WA#MPCW);XuN^*gn zL|-!FX=_qw;9IG<1vu**7$b-~ZMIfhF+=w&a=Iu2=!DNK2Bx8bRwg_k%4!%8ghylp zTAliFe8(5C+qZv`a&?6$6a6m1Uh@TSejHp(Qb|OwS+xWJtgq+Xz=Axv@nfrnj2u?^ znD8Iz11PPF6$o=P!yDmM&f?Z(fH=Jgcv_Umo;R?qoLs!H?*JU6M}~3GbB<8HS$zpd z539l}&7>g_ezS`8yt_}iF&vD&)|^^xj3ctNr-oT9JM$Zh3${h6CNWN?)7FLsgEnFm z?|AQs#u^?%fd`6orH;6!_ZWJuNl9YYS^mVa0VM8%0XX@2XfHD&w7x!mcqDiSq=`#q zx!+sHu~SFrq>?7!HU%v1^&ke_q?VjGGfAFW_+b`k?26dbm87h=zc%TBwZ9;k zp_xGi2Z)B0GyH7ZhEh=4x!7V#BSi7~R8z1pxNv|G-H%|Al->VBp` z*#Y5Jr^*`Ekq33{n;Ysrk7)>aIwfhLBpV}gW*ng88uwb|y~|um8iy2<7?&e*CiQ+rW}$%_Gh5-^1{|9A1q2ru?bNr>b`kZ(`W{vVS_sWJ@~-0!1Ty5Ktc!# zz8MSvGj>jVEL&qE5l00Sav$U2d@PVnJD&4q(=EF?v4gW%X_7k%>ae+eM;+J|9YNq1 zunn%_d8T7Q*}x&6r&FhShOp@~J@)^BW8@2*0-Dc#gsCcczpQ(Rk~0bL+0ZTft*WC5 zqEiTv8RXOtwvhO?qNp(3JPck}pSHs-o5E{Ndg8m%X{g4$3M^MjU#7v=K*5HEDAnYD zYp)KX$=2vY>6$NqYT+`;oGcL4OJE^&es|U{N`TbID}bg1e(h_$LbSNFPh+g4$4c#l z8{fa7%+3pj@DjvXxV=Vdm&+rIK@aHfq#6=3TXjhkl<%)qo%AyQJ4*45v&j>&|F^?+ zLK3XmfKo431k1vHRB^Yp8T3&GKq zBbX3FOGnG4J}0{~x8rpWVm%IC3x8jqi*m%k$Uxp5I|BK@&4(~P8fVP>k%Th{ZD!Lx zY>Oh-I=X;==_=VIzvm;sau@p~89oRpw=8M%n_lbKtp?^yt|W$aRu}aWCbr&}jP)MO z#3)1D65W@imKrWfD4oa#1INoyQEruciAdl45z$PbfC;cJ7*w{(ElY%b)sIC{)k*#l_7A+XD z*uGy@+b4-~veu9)0gcQ;nmqzmP^6(ON5`P}nM=j|zrgFC_ z2G5m2g`w|ta*s+E>K0?N>jsRic$BR4Jb%V{gD&`cONq$JTUtBhG=NJrwoH00@;bH| z88i-m2yGijI19hF?;%SgxdW|3U2+uXno`F>@g0X{gkBz_!fs21ET=HHxKfq08WedE z&ZQyrqRV2ew)0$QHZ5a850LXb>cQ5jGTk8u*-?|07mcxyEj}SIQQZ$PYXu4G{;BRW zy$#HV)KaX!w`MHv!KbiQ`NUId#8ORn1h4}bco0H`UmKoY6ls*b+YwRHwzeq`{w>f~ zbpjEc5Sk*FI@!T^+(4T?>kgN6d*@!|P75|_ zd%k7o@@5&xdcZg|2w9WM6Zd6B^iX|+-eEgV{O_p7p7tA26`}GFJ+(&p;JqwhU+hjl(ptiP4*=+srz?LH-~CIVY9OE@B75c0c&=Eaob{_ zd2%h%lN?8+%KUTd!+kR`v=)Km*H5`5Pnlf5t&|A!0f=9}MDjkbs(>R-NkJoffCib7 z+^i-!VFr}LsLENY$S(tJbd-TcI^5}9Uu*(|D7^!vyhB6hJKhNWzN|1rg8`HsU2DR~ z$C#iWlLtq@!x{)+Ni>0v+=-Sx=(Z4qE(>UQH575-8XZ6PL#rL z2ph=-gTH8khi2|{cZM>N*>lA-o8m&fB_J-_RdW6cd!1Y3kGSVN(XvaumvFgR8LVNH zo;zrXRUqu+?c5#4r2WGi8V;RZXZY37cp6LuxR^=-a-EQE>#2S@E#y&1!7hhcG%3lq zv5#xnURA@9NN&c>e}gD~|0yrJsiDO*qS$~~+(}%E{R3wf#{G<;xJaDu3EYSk%~tN0 ziOcm-O45zp+fRb9w~1-cOgzmM>jQyT4RJ=(%|w>4UY2=!X2aMDC2KDX)z@bDxlImk zGRyS&D%9+QY%z;Tm5Gd*;yn$E%GwBbqnsEk>Ru<##AC$wjPPZMtX04y4Fg)<+B|3N zLM1wt{ma%8vhb(`hDC+(#4~!Xn#Djc)#S-CN1g$JkXGg)01_g~37eAE*7bT}oIOq@ zrMc%h{evDWT82CH0n8RN%*2TLmy1JYS}fwF?UM6J-O~Y9D}c#P!Kk|KsaK&=%7)nfk>*!(8gs zN+C%4D18MEh!u`ul*D~kQMsBG0Y|tVSehPCS+}}n3ny9%w2yM+^IOU|jpd2W;0>ZQ zZA^ctuky`ZIG-RY61An4V*Um>%H&6KMX`P9t*yt#8O<(XO zc-X3>%T$6MzsAg7L^p{6c}R=emi}l!gYkXYBNL4{#ZHbR7`|?98xYHyYJcPq2GKH~ z_WuA@OZ}Z%Qrhsn+e(W#ls0MOlCn?1OP4NH^DSdxsj zviGO1QCR9h>(Se6-LZ+nEgCY$rW8LIfLTykXiK*mNc|~n9aK2jr5%Af9gL-_m8>?t zvB8zB8Nyr3Hx#2LqbZR6qwzO=!sBt7$VfS)jKI)s8O1rMZ$DWQC6Ik) z;?H6==-1$f?lM1dpdTbj@P$rXqMiD;dlP$UJQ!I)_hmzQ@soQ#I4hnd+dWMXnW25| zp}PBl%uc^WHbzCXVubDwKgxhFOl89%`Y3Jk^wRVIy<0v_Tw&Z7*Ln@eTGHx~EYHc-Jj0Kkrk!%Qb$Y*{qR5W33zgVSI5Kr7?>&tS2|tp-^yTlR#e@?qAry#9?F*MtV{SIG$Apm>dJ z+@}qb*4w|$m8tQ!tT36zMOH1V@}5Q>eexgSxf!T8Kus(o1=06D26_7zJ@n^TFuZTj z0RF<20MNHEMd{wvq&SOM8`Io-6++irA|Ml8cC5~_Q%4shQ`)C~EufVuh+je^K9=Xc z#NC9WiACq1ePH^ofM{v-a{+XX;A^93DRBP9W-;a=j&&C>K!K9~Y2gUf;pW^KTIu0k z$!C0ts#jwVUxM<|Syl-!*V44TuSK28QDVVd5qA41M41a?lvBl*MPeZuL_6-ppz*T4 z7aia!pnHT$0c&BE;x~|A3YTk_wW-%=QTc^do8O$mh)uy+|1N+(%E+|B;PBEa9}mTk z#-yr7WJ_S*8VvBCCe?vp*#JM=v-tx+pvz~M@6i(%p5Ib}R)HPaQ_8 zFDRgB<`#2g?RQWT*(lNUC;-SUGZXDdG0cd9Q_}uR63%r7^ju?YdXwX{06a&kyAJDc z#$Y4krHJxvj|$Ed(FD?V1*0>aaEq+RD&z!s%ZA+i7IpM zrhFQi3X&lU@+J9Ri6EQWror}zmcQBc2ty2df#zNt)3947J8I~cCI^Q&^Y$aoh3g(q zo*`YrRR+>;e|z%6IbK@?e<+SeADZUeiX|WR!C`7};;Zy#(Z?zn;&M=+_)L5lE8<;T zz083eH@}e{Qw(#RZg>xWp>ZFCB4(`ZCKH{zzjjChZ#2r=aTm1(T-t=RCm<$13qG3E zLSy7e1Fzt;5y?6CC=HGT`V74(Dp zIBB9R2tRd`3O9q*|IM=EUgr;YH+%OOl=mGSB{4w31APp0zk&uqQ4z_Zuz;QkH#(TX^5kSm`&_$~C|SjjrDhw}umKyC z*gK*{q&ATtx;y}rW3(wjoNBAAA@rb9=vc-6nS>vg1a>sk&Ub4aN1~`p{e1C6&=Hr7 zQ4WAJcV~^3B!aLDP|Q_-$p~6}2kO!Jn16=?8H%t9a(Y|IUNdY>D|En<5G!d9MQ6)gs*S6!!k&FMVLFQtE$3P?Yw(q7oU(GLSP8oq zhDFR_1aqrnt0Uy5hYD@Tc?X3%vNWa#IZnxb{#Y36Q_)L3X^8c4HGj9M&W@@@?_)`u#C$# z!5a?MCOaDz*cfA1E4cchl8RX)aa^59)+Jvlw)hR3`R9bgn{_i2%dL;+f+l#mza?ep zAL?zl6i0*h^Z>W!q0a^Ktmn<4oi{>vlTc0I9LE@n0?o=R2eqw4Vz=$IVW2ZgA?7(p zQnt#F?EhLA@b;zMhS3WqJGFvoQY>0&_F(B?&_h1B*Jwk{iw}dJdBo z_BdD29*M-v(Ro435aO7IHFr=%(C$MlD^Jj^j2r7aWo!aS)`BVca3!x`LpBTIxZ?~Y1-a@*)n{!!ui=E<`8hOgd7x)#E&Og)vdRCG}a|n0d z7%Izj@DhN%|MLi+7S%$l*w}{zx5P9&{3JMxdcQ~fi3CUW&4JGCrc&XTRL}HG77ZqV zJAqNF{631S+|PnDgEa7Gn~l2tNs>?7erYppjbXu=84B1aeFs%M$y0F=4axB=FMgNn zu-YZ?Ido+gZx$}H)S`vl=fpCL8m^Ggc)e;f0we=WLU)i`7!;RVZ5b>^BfkGIAu=UV zj@8p zSq|U3aGgt>62|_&#U5V9&Bvv!TQs|n?UcgSzRa5LzB<Hs&eO6f)*-GR3Zd4+6;%ahWv ze?CbBD&hD{6_A+T`n+2TKiEYuINSBaH^pY`askF6+l4*mGO;^)eEtmAvzu49yqF@ z1yik&53P#*Y ztA@(q+ERXwA#6I`tA@{s+s`RR14KqUi(96c$IsJ9!4kB7be1a>Ys=mDymU(3`s7O0 z?d;4Q4qZLP+55Jrlu?D&D}I6eKA@WVrx2ZySnS&bA4Jf@%gTeY6l0DmdNPLU0+3s{A1Ch&%eFawPOrN*w4QlI!WNls za@ykDiX_I}n3>iYGEqo^-ApvME~sgQ-jQPl(7b%hk3WNMqOZ`S0(DK0{F`3RE8c2~ z{Yl)qHS9+n70)wRr$oIUdw*hSwxWTidpIYv1guy6zDm(zdPWBXmOl%eDN2V{aATe@ z$|Q0SIH0wqGJa>nl^=FHS$pvK>;|T>@d#en|Gpgv^UwpH5C85 zS0q~-T($*mRpfF(8`sXfSs#VNpblG$=SV$LF)y5FGev?6#WAhW1Q~tbcE^hVVTq9- zRZDdP5NppD4N?fE5?aHwplG{aM4Y%DS>`FdBrw{8TPH{JudQ9bPy}T2bzU>(OlG{`GdX4 ziW3*6eQc&TN5=T=9_;Rs4kiFsFh+-zS|}ipAe2!RlWx9Nzkdy6)Dj(~UV+h|sw{2J z(FNezyy`dAr%Jxm^+WKdYd#mrkxx*TA1Qyte5a?Z0h&#ua^Ryb4ew->H=XL!d;Bj( zpyl~|vL*W%u&I3qC3l$4W5a)0ktzK(e8!tgzFRo>ZIK9D3S9&oV~q7%om?*MPiV)Q zKQtYrKb@61=DNa}lz}q>SOpxq-d#+6Myn0Y(a{HK4*luj9g54ZoQM3uvg`uyNnXTW zd#xvLdg{IXDgmW(_fB~*=rz)$|Mygvyq?u&uA}BdH>Hf;uM@V9`%3y4;?<7J;tcj* zI|RCN;+5|-$NJi@;6X1P_lw2_r2m?|`@(BWfPJ&Y%< z^Nz#6IibY?T9-&>MM33wsmrfokTU+&Lst?;q?Pt-r9~}R4P9%FGNyinEhdMe>6%+Z z;{SWB0taHx^{^uttjF<&9@7$}}AxA)!+c$YK$G?EOHW7FKJ zIP+y))pWyRXi&3XihOnJ(S68?k`LATC-Sr}i1hsyb`P`co%Y|G5jC)(k2X(r7DAgY zTEnZDh%pCx_E*MML$WzKP(sw5BP=Y@VUlBC!lKssxc7lv-Oj!XxnKl1U?0_x0F+$TE_@Rrt*SR?L`62*(%2Iilk!$ZlQX z1U3{&)H!l{$hd9e{Pe0aF3iOb%rLB>c-JMnx%)ATjmyf5hD3A06-(G;PI#DoMEv}8cY3>4hMd|J?*ofCyg?}VIoOfg~Q;NmNEy;(S16=Wjo4-eq*9!A52kHa|7 z+t%nh^lauBC{$=DxHwc@aIWq>U?i4oyx(2 zH(6htF#;e6(KzF^_xO{8ffbU`hCxr>ndR;aOG;RDWqge}iozQ)^U-KY*6TANyM9jE z#f+JtTnfkpCD8H`SpQufs2+*a3+my_Ui18K^K+3^Q9m|5G2i)MLwet<=v%l#J;K;i3T%%Mg1g??>xPu#Z~r9XEjdI$!OO{l6OZ0 znL&D%PwKr_1bnC7A_$Iv&V13Sn9<^Fw;x%z1)=i&kP8__U@KpdTWjqYKJoCZ)=CY` zVU8?96hd!I-_`T*esVC zQ_jwm)CK~%J7n&WS*zS@?B-SJ<%;^AzJnl%^W8M3nFdL@%3>tn1;~UjR9>06p&*ux zvaCgj4Tq>4#~CWem69T!NBggc;pjh2}0?_+)zH+uLVbDbEBNLRkPd zza8XFdsvq%(M`Y|lQGEsk#V=Cf>a1<`XlT>geqw*LQEq?$Y9$BBy5cxNPDRrx=}8H z0<;m0t^I8ksqb9%*FBh6w@~98JS=%*Z^^0So#V5Ymr>39vio0Io^OTOn6&;lVGf92 zZK%jUsx4msy}e@q&cPHG0$nP(F8L{(LkO)(f>NtK<>D^p$x#*=PHiPBZK11sBEF!& za~+Fr>xSQ6GQ5*kmH6ZxfeKrSFRo1mf4^XsV%DKsmL~@u9h16oC;4WTrZrWF@9NMq z14q4gbV-9C0j@_b7m5{t<^og))rO&vg_C5iL_Cn3iI@vgRGiC=?@@CL^aOe}P5W(H z9yKPi@E5)!&4+W%9=;L-@iIDVhFmeEUBcDc0-Q>A4*&ryJya+_8}?nY-Hs!)7h2h+U5BvlP#itO%x9B(sLQJQ4cmQa-^*-Wzee?ySf|IgPy8j1mh_KO#FX=A_Z= zJmporp~v%m%8OvZ4ozkl0LIQ>k)ceE{k1tXO9#rR(Y$^X&7(5SPE>e@*@DoyUsyt^ z`S&!*SV%8RMJqUUulhv=jTNInoGIrbEhCb7Eyl$3RoH)L5zth!JJ-5V?+Dmjuvjr)o(&bHjo5J2e=Jm(E$@ zOq1)od~35(qUo@=3<6iPi@>sEGfj$GLOjd^GNS}OZ@wX~_o;A}7~SsYLRh9&@#kEH z2iPl26LdOK1aSYLVln>wHB7WR_OSgM5h+9Vc2APKk8%{*G>K}E-&6-a4QUMiG{EbA zcmT*>(^&IG>V?yqco`_exhG7nP^^al+Q_5dCFyl!Xr%u`xXAY6-5^QOJ{56xg+mkf zB|Uq*!w3~!DfzcB6D%CXxMNm$G8XY(Nf5mvEEW~_r`H{UpOzhtGgqUmZB*K$)D4x7 zB)TB}&qPlyd_k~dl{XL${N0|uhM~?+Qu2J&LaB}eVDG*hfym0*G|F4eWE|aL=GeF} zNGSm4s9vlgSXU*Tn(_ zKzTy?b1=5=rU;xl0_&&HQT2+P!2qhCzM4ST`H5T6y0;JAjWEKo)U;Rh3H@^@iiQ#K z!<#PgM2F1{2C!YiUIJNwI%cWZQ07ZzVD}0NI|nr9bU5Bg-26@Vc#hwHhh;8ay-Rj)+njpOzzs3y zKDViaIE*k*K!`m&AK)HVu$+DERC!#SiSWA4#wh{3)$tGOZ}}1w4MeiiW)-aT7O!SY zKhKos0dfp2E_Y*h+fy2Ux@`(I_edJ$j&`{#>7$PI06jV*J#o(dy;>(vsE?2QH#{&Y z!EXkWDvSxdKt(P>ve}xg1H5ZYVb+TZ+%~MAI27^qMWb4JYfg}G!yMGvs9OCb3YdM{ z7&>90G+d$Q+%eq;4WG^cGi*u(F2p0gpV=*Ht_O@^eP0&a)^yfBv^Z#ZCvlbEgF82A zl3;t*=EGRs0`9o`b#rae9&k7K)d8GtLcc*x>wJNwJA8YKCf>fn9|YmKGw5Q?FjDnZ zi>%C^sgZrC)EAQRS{Y$IAWFI?g8x!1A>o%TvJ}l@FBl0zq3XrSg#TY)$Wj{IDovf9 zQbII^3jHkRF~h^_e#AzSGNg+zYLk(;X=P=Wf5#C($Ub^XPlB1Tx)roc1a;8{HEsNr zIZ!-k*jB&#@#uXO?9cFiZVpN07+_!Xdr88~c^nj{ryR8;qhul((M)FKR%uUhC|#Pm zFS6$zPWVDwBro#qE;vXd6!$%>4nBD3;oBBv^wIF2Z-J8ai$-zzNS43Xl8CHQoYSyU zXjuXc2J9H92Id@!g_i}*fW#SZ>lF0hUlqHm-m+$ zEDq)LHk>=bMr+co>mV3SDn||f-JU&CRNHOkiGLt0UXqP5oX%ji08*F_xWv6Q=vcdM z9DIc>s!1gl7?2h9VyK1NO-wXg9dG4S4{`&9Y> zul{AaCV_5m`S%OIS+}$jPa9IZo8=LRH0iS4#$c>#T$0-GQv9jZS=P74Q=BS{TOY6L zH}i64#c2*w$fJLBftTdXADzo{bV~0-nYHu9Rd9Wi#Co09?r7=YX`{17LHDXu9lZD8 z)@CELIMinAvtW#Lt14hZ2mZL@x~6&;70fO-Xp@s&8I$`3OL>*TU)zZz-T9Q$*Z<0> zDH+U_5q556?Vc284YGz#*&cNzS09|LBX*Hx*D4!m&UWE0Ej5lN8RNgx6Oi{L9=WSx3luyq2rXT0s<_^_$7$- z!Ko-6&BQ@+CKc-yLHmjXW<_~}5mtoa#~5*zg7!pCTk_WZi7+>kN_e~D=aT)QbyDqf zePr1893(Wx3Fnx$@|#0`5s;lCC8Ug+zm(1!rnrHV|qUwOXTb4 zTSP2Gf{`lG_>zo-j+1~f%SwX)z76O;)d?dXFk$7m3=flZmZ*}kPe;%Es}d`K<@fGS zUaoj~6Xj}<%91du3pihFp>@+DKTWof)K3ZOB2F!bX*`6K;Z!WEYiD2R@WH;ZvnDLc zf(*e10Lpl?s9LCN0A_Z8cQNG1;I2lDc76m%eRuonT6e`3OH*%U_3i*NnBodqFscNb zK4!oVF1o&XgaYvYt4`532&h7AKua3@RMS@IPMbDn&H-KJ3 zWm(;6%>+&V7}8K`TOBc_tV-A-c!_-98ay7&zhwn=NKmr4J~4Yj`nlSEzR)dd(LiF*PfK~z%uKm<)}^Z zA8N#{J(gvxzN4{KaiSqnToqC!EB++9--opeBG0fi2BJabN$L8uNUnNiv)~gAH=^T+ z!nWyG;u0!DDXNLApN$i4`(G1~_^!jGL6<~w9iGoTjY%w2*oZz6L@x@@k7!@-4>k~{ z9N;gD;Ocd&l`7E{_vhnm`QRt65GNvPwUCdXqJDmf067uRh}6dUn_^a!5mYEZheAVE z7J>8|urD#I?uv)Ympl5$d+piG8kDb3S&wwrK3HboLL%%chg-x2^&nv=GN`%!bPf`F z4X9~a69=YSbRbJ!#7LKjClGAtCs~|3_81;v{C#T@>h-x&nGe)knM~8R+>5LhFM{!! z=~^kJ9cm*pRrz{?^O%w*kAOliUDW{gME)E5@vn3?*6 zyNrLk4+?DJd$GwHwBpi2Yr@r<>Hk8 zk}%=2{{?(t*dZ3yuBO)n^y+}-)@RtDHO ze}ukchr}+%q#1=6k@L4tF%;#gOKF{3)9jf=x;jjuY(kX{cvm5dIMsUgqFzg)grzJv z0SJC@>QQ<*%D7vRT#r5%$uahibFZxq$+p{?s$#nsw|%ob>oTexj&(r59;&EsUCwZ{ z=4!>_@sJp-!pWR&gZIN?>lHFH4PV%I2}dl<^jJ^aV_5iZ=H=gZeh)jms7Pj7A zwU}WOl~J+h+`u$_3K=aF1$lX%_2`w6Mo+m`hp7ib_8-`< z1d9bO&D%{L^uWQ8n!}7fTYE)L?^ww28Y#hsL=8PjSt&%=USNUP1&lC>zh-k;%oe6j zveww&hIs=eY>|W#4&cZc+(-^b=pmC1$L5*o%)RS`Qv?Do2ei*5+O0$xxfJMPxa60~ za9{}lFtSe)2KMZi=7RU5&<3I-od>0bkuIu&F3tStxZ-2eWV$yMbEL{%h6E+sfe5J&;b_LoG?U%a~R*HJn(s>>LQz~ zu!)IFpYr8+a4#OA=vciK&N8Gha8tivT_j<(KE_*r^v_Fh|6Kr7K}gXYvHg=VB)=9a zGB@0aXS!M#9C^P!Cr=~ zq2J*9Oyb4MP=r;^s)HjuCs3nT4{~o3B|-0hftlqeHxz)H8iGb$Yfus2*GFHpZQ)Wf zs2f4T2h+`*1+bXU+)56Czj|S7BA^VnRUIi_*s4k*Ba>Ct6yUkbHf0i3g|oC9Dv&J(3KuYSa0BN$!vYoxf26 zBcd5*c`%X|uq4X|68@TZYQgl^uVa6OpW{FXV<0@xAK<~!txWQ%fW*ZBIxWb4u*CKi z{E~fmyAhy^))k&yi&0Ak%=POXDZS}v4-EI4GmdD>;=;Ev;Pdojr9=?L%%W*M5XC&a!5g$^b!jli{e*_lakbw6LY*3i=4w%lkUGBrlQLKQ*EU?% z`;JxmZ9cm>eP_(Mv9T5U+lc3;hzr+u`vVFy`J-#Wl$PvdSbusT|NpyjU44|)f=fd z6h*cPg+a&r3Vu23+HXMBoGm#@3@YDlF#bZJ9avkLN9T>~p3C3c=YfGqY;4^mo%^w%$uZ zRm&%Evg!*rqo4ukKZ*=mz_XUVvh66En5QAFu$O$iRtzIbz3b4W>&n~+78^;=oLa99 zeQ(7hv`O?%umxZqD4^b3f47BW?ZNZphj2|lQ|h0VK2UrFlMQcowQ125%~Arz6QjbR z>KdsCjP#tLBOqeHt^2J~0U#p^%<5BbTUI{=tGobOcfuqe22(QJl7=|Z5d#HP#*bs= zih9R~SKw*1VTpRJvkF4JsMM{R+d*yd7)WBjqb-@k8XP!KX)Fy6b%aP&Jk=U z;5PJh&i`vLa_J$-aOxpQc7H>n@)Z2wp3;rr?&vN5%-IwSk5QcU8|Isa%@pl{% zLZ>TgloO2@QJv8>H}m#3zEvDg(;&#rVWPAAM=&l+b&_+NHuuih z4(TS|B@FWyvs0iR7;kM7Ow|~;iWSQc`Mr8Y|4jKEHnDKo2_}kzf?X_lNo3(sxSM>) zTp6e4Vs7usIzhcrr|UqdB;r(2i@+;*PLe9!@MN47O0f9qUmpQnlRovuBR2#bok)y3 z)f1@bdmE$r`Hml4VUDcbU-a-L%cU6f_gmW#{$eq|GCVMzK)V#S?0i-%i&CuB6@!HXBHq5@lDOa=y$ zYh$0Bb(sRbX*26SWD&oGL}LgY9sPo&)Mr#g{Wvw~eLIGgqEd)kHtplSuK~^4R`%*u8Q?ZTb zLN(KU?zizP{wH$pyLMCXoG_WALy)rO<;dJ;Zccx?)G2^Mxxo{sA@z)sEULD4`g zvHi(bYgq5*u;XDAM0ZWt`T=?&ASaM1!(yK)DExS}6gGK=7!XfY%*C+RI_YGz>O*HX{;K-h}-C>j7yCReHKgJ~`ZlCAxbKU6o5#aKwEr(GtuSh?(@H|VdTDaM*s3x@Z z`eKZXpqa0T053lPC~h_{en|}%i5Q8YXww67f^q1p^Zb4+?Qa50GUL>uA!T0_@8*qc z5d4D*L5k;fK9;oFa5*o$HTzb_NTrlp`*_>K}#WqolI5n z(>4?XUw%$k1$K)9CYQRVWx}=DA)j;fb8_&$5|J&#UgT~AnLx`72kcvz(l{QkeV@(q zDH_A_>l5!lJQ$=U`xYhz)b~JWksg##E@(Cv2sx(PVw7Nhw%_SN?F=}}-VEK|lG*5k z{GGOwOisG>idIuw@%}`Y4<-+8o4x)P2ll zJ7`^AJz^PC2y8a~Zc>*#bYI4XV> zI1V&EmfWEV)KQnsMM5oOh`(+1aBh^zj4nTPFw$QB&ohu4UB_(`Wi+DtH#MZziN2H9 z+{c@nfL0%Hf8o9$JZ(8YtC$tE%nzdQ$ z6GY2lLgiWbrWdgb=@=_=tT^KO7*tSQTtY_W`!R)W4#$}>*1Q<^2|vq2Lh^mtw@%Rd zBu^8uaNc_mQW|5jOpYo_@-NUsL{>3#wuIY0UZOrt;pi}h z6&vb*WESs6-#Oo+U(*MXhSkXw-|ieVIRTPZDRN7R=Q)B*ZUu{ZjqVK2vNv#9Lj!Xx5`!?T>-C^u+rA(Avlw!K~4bE41ov@8oyfgN?* zRveSXt|BFbJR{TUatUBwi_5A_!XNU$i@h+atR0*3JO>dc7Doj@Hr<>+^&u?Jrf%Z` zd&(8c?$Ioo&ePJdwg8JV*QyuxG^6nNixK>c8^rSkptX9T<@p~N?svra5m~W~mMm-F zSUF6pmGP`RW34fIxKyKGx&0@}eu2LbNCE#1Mn?d`af|=X&?iSiFy6c&s2L4ba2bcO ztwOEnqxG_2Ce4Na`E#kOT*()s!qC%|)K=ojvp+H#cO<&Me4CQunr6gHrUsq?n z$(Ndl{A)AQ+7FMm!*b}}@U<(4JsA*9c+VN8`(9Khb%&mxT2#8CA!LkphMe9-gaW@y1i===5AURs}51^vgziQ8@ctwqn0L$#=dopsoAkd7rJj8gRgcy<(rm3s0Q+;$>Jdi-k+FyI0ei?Oq3QFz z*P21gxa`>lqZ8@S7RuFj1xUuTIaQcx^j1+Ns{mhj8z#IUL{#*mP&D?{V2O|wRQIG3 zvWR^Nbesn%yD{iu&rYmStNrAwD62h=MRqnrlc}dFSJ2BUlDAMvq{$pFpXmY13CKKg zD-dnk+bET(aR&nyV9LR|+zMo&WAPEe28Fl-DFWoBLKtypRAW7o3++=~mJXw$!vHFI z6J~wS%Qp6J2rwzVG!wwJF-1y{&DJ$@r9KM53+6;6#nf|X-wm%F5#f&UYv16sQXBP< z9xIv}jDW6j&?%~%~io41t zGvd65#V~Ps^xS5ET^Bt(H0LWV=BzI07XE22TmjE@r$9i+>KARM2>7wuUXTv?K&ii# z0djIP%#t77p9fPX2Vpz3;W3G|WIv&NU(#fXnxUxUH@^RIU;3P_XzID= z7ruiuVmWAq@Ny!6{8>rXx3qxl))lMF(LeqZ1 z6e+*z-TaE#W?dxPR3{*zjG)An;^xmQqGK4T{vTfOPCsmio^$&(aRUaCZ}oeRiH70h zFr2udlQCnQ0A&}@dy@fKlMyP&llZlOg!ekdL%w=x7CZIn#|h;p7tRRm4_clNkT+%W#?H*aO*Rht5|6_6w7b`@JL z@hJ_`l=G8hYlq9%0(_i0?s2}0t_3CvjqRlLzM0NQU2o2Ks+00c=p*s;HuiMsi%tF^ zGX3ow253MB8pUOGRh6Po@$?woLCZFHNZHU8BSCk^3c@4m@DQ9&=5p*MVO$8|sm$)K zsVg6;pm!mVP$SHWzmdl@1e?%jWa}Nhtz2OCE z?^<5Rh-#YR((ASn$NOHA7fP*dbLm=b&MwUUuRq#Po!BrL?J;4?HuJlMokVkThhB_Y zXMGzR1Tl^aS=`&pwj;_>*YjLX19mnb!25rF_c+)6(pz36QD4Gb0Je#E;X+^In)fqb zk|F?8u(F$jL~Lh5#5<^E$^}We#CoHj0;>_nIA|zV*V->=id8Rs=?us~H)NzU{8~w- zgb+#{BAZfaA$U*iwsqdsb$|tv3LG|S9oQf-WfSp0*}tmc@jqNQC>b-pS>|Y_(*FTu zcut}J^H6avTe2F=ZNgI$J`a+o)1;SD#t+1%1bOZ}v8xso)Y82lNi>*{lcORC%Qn6o z4tAN?W=-{0rp>G2SZhTHSS2j$F4l{O+hgz9`%+eJo&Y0MUbuF#{rP-ahF(cml*7qb zs(NJ!wXNP)q$5(Di=+?k_D3{*X7QnZdJf=H0rtkllnZY86Q5k)_n@L>!%yMiK`p~6 z{<~MB!_Yc2S0L>08o!fXIA;2b&!iab9a!8*+pu)ZJ3;?uvKOqbVb zi25TGIrTxM#Y4iSn&E^wRw^kyMbDu8 zF2J;aM=ku6jTZT?FP+xN@m`x-V4yO$9^>Dj*I7Fs?s6S_$mNot;9xJ=Ym>CV z=v_cDR8O%}D-o}BsQo^MbY%d<sw+k~`er=9jg~IN4-dnVO@#P%y_N-3Ix9)iPO1_+@tIA5p2`f@d&tGtN~H)e zlgKb&9?94kxX}gG5bqYzi^vJ9>&!CA;em8!+?L_R|GflJ{1^{!kYs3>L~xs*5*G>Y zP!1dnDi{RdHaHBma#Yw+DMT8R`l~)vI)d3-&|nfL9GVuzdi#jyn_F`YMRCf3z>qo>ahaMF^=KV*GjCx4?E zA6;%Qmo6;ufbZFpZOaSj2Kv1eb-!wqa534!AUioB^f2stg-1Ep(e5Ad(vP_eHD`I& z1w@cb@ncpR`g*Apuw-?vyXGt#lsRM(pv0P*+uEsixh}c5c5<$5E$%Saza#q9gqdY6 zwH@4LaYuiv#*j)Z5~WMq8UKd021$co*kYz(FUQFzfJ1*U>jgy0v^sA zLU?p**7IQ7m|W(qnMSsc0tMqF(&qgcQcVy^M)@kt^}vq6J6N3l>pjgMrXvZm?Qz5a zX12E8!<+6o|3d5y%FdMUiuTZXC=Z^`!oGQAR#Ug^M<{IpYDaN-V=v35gQZ#1R3PP> zUguz%K^}vlRFoKFi^##)iJQ;Tfp5ho@>b|Tv!z31!b2){0VOIjVbh9{t*dE4du@ld zn10I@pDdX*Rq8KBpfRpkhiA_n;G}fPdN0azowyiQL9)T3?iy+xHjfTXJdG#ZPIkjc zNmb+GN=UGxU?Op#(aJVH~R-(z%i%mv1ZW4^hp z8tsU```(av?OJqr1GMeu`y=G@MG9m}So=dCY4g`9*sLET^ ziP)dx7y|e%U(nt7b6CGT?zSQV-SzrB9zo|ndmJi;=J7bXO_7e_yL^CjetgvWajhm? zVH`UaX}cZEdtcDp1lmElwef}a=%m8W*ZvMweAj2 zv1-&)a&EreOyhpu7{`&haIY{vV*T9L=vQv9L?fsoc=v%9nBY^p6fUc{Sj4D5PJ80; zs=P3C-9Xbz@zv)(@tF1=Qn%M3A&K;3EXPPQ%2-+*n?XwPVyVpMR{EIw!17$#qktR< z_0aKtYr=!(bgk_8iN%0MNZaSGWdHb@{fzPmJYcaRzyCuU93&x~HA(Be7+%9ee>Gbx zlX6FR3#tydwY(bFJCEGT1U{Gd*%*R|Znbb4dmKzRr(k9nd5JXL0WCL**eGJkC45pg z-$LY4DgrV+>vlrzVF8FifUa~RYzjhqa9~7pB)srA4%iHSIX`AfU2%bGuVD|@_10Tf z9b*}J9eP+1x-@F)w#YWY!Aq<0XLE6J3sGAm9HA!N(cp$c?*(h+#rmX}hM8HogdLs> zZLRIXxH^OzXXJfklaz@WZ>B9)M}Yo}>Yy8P?Rd+k7uqSyH5mMPTT7k_B{GEXlx@sA z-3!tuARHXP#pLuJ$;LL>-%03M zyR|-fE|rFZPnp-qUBK>1w&>wP0kNPz^A5T9OtjTH?Z!YKO{=O=Uj4o67j_ml`o&nx zhv2MUH|+{5p(oz6rVUfHKP4M~)h8#&-=|v?HbSGNaK&KjQmRr!7|5wi1phnCWOQ+h zz-*x-XYsFJz5PqLLVE&J+BF*&g$c6XU>+(r0tT90PYWXl`oP)t@Sv?ABeRsV*&idM zJk&MQRWT>n2M>)TxK;puZQumN?>*P6E_V(X;bmtI>RKC>U&G>!%Ov-z^)7wI!Z3lM zbc@uXwWZmYyb(_094yd==)OGp_3SFPO(h(JrZk9*h?YL=tuUn03>ZzP+bqnMc76{H zX$xFjl|SE@-Olf*{T)&^<73+$E49iO4G42^rhCl2cSw9t;GB6DiwjJvZv&Czw)uyG zidnz*t1)X$F{U5~O5$LWnj3Sm@%hL_LCFz`axz?ns1!msYT0vD3McUMl<}x*dox z_Wqs;(^SA#fn<6*(805d!+2Zp&h2Lfp&x zgCYRT+yKsU&y&e9WnmN6B?h+w!9E8Q5K9S4 z77|Tob7t@JW+i&@+epjfIi*^|w?3im4;RqL;EVUT34d_i^C2J6*Ym}4neCKI63~Yg zji*!jDNo#XDhX5@+b)@XUar9JXw>7RE1_>LsqQ$F58V?WZi;qJ z(C|Qw=2xj4nGPnZDd$4@^L3iXq~;Fl)mw#JEf`jHdUNu5GTISUEj1^Axb+v;#PXve z9-kEKYbR>{x0?&g9eC4}uA`FmhN}6sJI6(QaISpKT+V0~RBC+^h%hsPg}PXq%IFH+ z{_<5GS4e0pOc!YwBW57+O?!}D+PfezT5KARMq{NI1UGXAG@{ZtX^4}bL8&G*dnsuS z9O((uH6QM9V(Ei(?TG~c`Na=dvxCiI0;1b8i75Ios@f@E6yZYq9aGk5lDRmNiAVk&+gQNxMX49 z83lH>k+O%knZgsS+`K-=e1x1~h)-W2x2*Yz*pq73&<41D$9!zpAG`FYwefK(cbcba zSqVGeEh==U!kFR`u2nFL8oE(q4$Xre+w#)j5H#Llv?292Ya$}?LHcuIICk_IO~^I$ zLWhogg@_<~xyI$J7giLo9}+vm4}G$GVNaU2;;g`6^0Dn@+HuBf9`qj zrs0P_dt2`6b1e|dnSe#=3UmN==6@&2!zmUikV&x>ZpRe9teRcG4s`aPQ&AQiUNpU4ucC|@6ZWfYAZtM=|4* zoBz8#x&m<$U1?L&+ah^U7E$gLOY!$GmrpN5&_EMqbz*Vl3$aVU%*6l4kime)!T9wS z=+fmGwj!YaPN{<4DKZGVguL`gZKo_KT>dm2K4Veb=q|!*TsV7VA6{3C8zm`9|M@cn zARO%PWEmN}0vj-rw!7*McF8@Y=pqW5RS~$A(-rRPtJAv9i@2Cf{`}j28MU^Pv^Ozg za@AKIQ#kGfT8}<@O}^^Mh;bNk z)wAztCOj%J4|*fK8ik{O3+pke;^}U8a-J2h?rKf>fk~dfqY6)kZf_1LlkssQ@9``{ z>QC)HARMaY>8d1+Yw6}h)qEiIMJwo;-PjhKB@Ix84lOmZXd#hsb-GQA2a6cGj=6U3 z{&IM{tP3)Ny7l`CFbPIO9~z?>{6c7SQYKUI>r*WLCZWo`D0kSKd*sxkdKvBWe@!`S z-wwm%PErtGvd+#w67DljtMOf_x<%_j_pBtk655Z-A_D~(ZX$LLoReBBh2Tj$SCzYg zJOU`?gfRz`Il(hZckSF4=cMS8mP$m49k`CH$-eYu@%)ysHy6#E7K6{5Ueeb%MVWip z`6q_P@eU3ezn}Ww2d~2!9b2Byh9`>+vNFhNVsIQmow%quFQr>RL?kVCh8NB9eHIji zCv(}a838h145eSjblr^-Grl4C*D}}-|6Ut%LDu7o$1hsAulTuqvy!$jR<=IJy~qyq z9^41gLE<=HN?XJyn1tQB{|uTxOJz|Z91v^0!^49&^+6U`D5hk zfc~%FswL^_HH?TNq^~(y6jiLq!wtS*-)7Vg=85AhVme~$^PHzNqj&W3Pp@b2hHjlW zZRp^)%eItGPXJbwKY?^$VNF@NJ4-ijLrKDg&N8^;x9=|RFa%)(9550mLRH&CTkjwD z$JJYMrv|Y`ue0n?ZVaU*!HYMNt$_#k;Rtc&kC=izoYju3oJn@Qji_WCn@1YJ`*^;C z(6dcp3SQkD2jle{NEV(V6;!OS(Z;X>kV&=HYL>V?^d1YE{=;IDqX9Y53y93CVM4;> z)E(*8+1%RZ;U>GLDB!G>VPLP(IgJI(dAOM&?gMbUEAxU12USE#tiMjKd#+B2Ol zViaAIaVY&>qm!7&yfJ@K-3|a^xTtK77aQurv79i3I*js%q=^Bkv=NG!*Tm|P@|0Lg z!-{h>9|b;4Fme-UxKJrzJVw(oLv*a|fF9(hVYRYwN;`_c4k`3l@DU&8=C8~V2@v6r zY-u;~Hvo0YTtKX{s;VA8Utv3|2_gid9+U*$q+H`pfdSDnt}hPqnZI2>-t-M@D5bug zN4tgTU%wGj^BlpATA?Q3C`w(M>{Z5Z-rBNM73}|U zbXBGE;jd>8GZOrwHV0arJP3uw#1s6x!ikPjKOYlmlG-RN|a6%>1c=;EPA@k85EkDCgxfuNp9FKeeQ;lq8cCUF>M z?Bfp7`S3rTsw;JrOMoB0*H~@=e;TO7;>q(A5%evpJYH3`TKAbA>#wQ92*=Ebum0i- z{^qYe6sznX0r2rxfoJa0h*%8wT_EU>wz+Rjcj`Z)5eJ}B@G*Gcz-7~m5t3m%G;W2}Xt4K^(=tY%Jgj%n5lgFscTSb!RUm%J&Y#z<$-KszrS1}eyy z5sS!;-rz2rUtI#%OZ8L*EtAiHRV*1N=ik$VsR5kp$cB+)Xhq4$Uw6sE6q!;KHb_~o zO@rftxq~ZtN52O%$515Xf)W)zViPeS!I#pHeWIX#>$?l zg?vDu!8M+}6MCS+e7@h<$u8&I$Q)!)r|?90hNu{$_Vx`N*3>oH=S=W(nCObmQJz9W zJt};b@NqWiS;c{<>$bG}K}cFO)O2|(Z_?IFL0Xvm`%>NA@HGhrQiU67tQU_IIyCop zaRKqg0p+vKjs$*Fw~cwsA^}`r*x3k~0qinu^^&KG&B9mJ2^J$_ z;VorIHXjkyoV;699xAsC6>7cj;vfvdRrEQI3}4PPn9#;1j-&_sI@>aRCk!j8BR+w6 zTBn&!%>=um-?XsYl7#Rs(@x~lb~6F1TIr}QKMEZ;;(dS2!Eo}mk3 zsk(}e0GUCvE&laj#c!w+F3mJztD<)U62A_hxrWe)(2c0KT%Fv06`hPb0h^stCaBEw zSIZ18X%k+06nI`kB#J|E`S_Vw=QmoV@lEA^&7&coz2k~wMsQ*xjt!LIZ z)&@RP(E#gYwmmGFWX_XOmYzmaptM8pmAU$DD%s9xY9fJhJps>l8Wg(2P#3R zAq^jl+&uCAd5kCzA{t7K_-7|5k~5xSg=M~l)t0ThB04&gciiHlx()e_j0`NZLmGUd zPw96dT<2H@i7J{_&$h;XL3k3AtURUR-{UbPT&EheL!KL$Xs8e#P5|NnVLjx*C4{GZ z6j9m7TADE2)oN66dI>2bQgrC2)iMfN>2po&1>>_?emZmd)g=%+vS6^$02EKX z$uh-vhCh@l8_gh?{1tbToq%|EXQ0LtV_{DDT@S!{CdZeSe4rp+8Rhxg?g4|^%x&-a zWAdOB%ySfM1{+IkJobll4D<100Ufv!`{A#oTj@YqpKeCHGg=F4>)zn$h4|j5OQ>pO zP2Y@JNqiSJy8-ATJL;V`3FH|pQxK{c6J#|kcvlTM7Fp$TYAeWCw^WZA)HnRM7kF_Q zzI1GG?{kXoDe>h89UEUy>G?Ki2F=>-n|sSw!13dZ-mrwt9VuJn$h{p*buxORypnw+ z@^AC3O=th!?%-%NXZ8H9nR0^5zpFwy$`L3ULi5q6Tz8+Z&*)!+e38%{J_`l{&OB{< zr}JQHI{k!mV&HiU*_D{&@9MOKTkF=yacAg%RuGWrl1V0(-~??~izl{B z7dOt_R>3JfnsDs_7){{l8T-lJyKwT#d}@~XG+AMW-#XnrL~G1`J6PUNg=F>N*mus~ zX^_zl0S}o+v`mZZEDx2pAnQpQiKq}v=XiBgI=HhdcV-W#D7=KZb>bWhm*Fa-`i@2l zVu9k5n}|@44s%jSY+nfhphi&Jt&RAk-@fIiwTMHofJ{0_<-`R9TSG`#5z|h>Z2Dsv zxZ=^Mf#m}M_}tYL^iMJLi?<;7rkXSSj7f?%4=B=yjTCun82r<%O>axC8Jwa zSXDJ4eaTF$Swsw}oWt=Z=d_MC#^&4=6ii@G;@|)KG$_wPlrj&~6B7oqOzN1G2|ZXqJ5Jt591PvS6dH;=t&+uGgbrdEvGn$9ph>Q~HU zKDLHH0tizU#Q8IgXo{qovprzEYgLLR@*(w?;Wj$-UT7rVJX6r-YuAa(5d~#u>^aC+ z&5Ek4c{1x0;=LJ4Pls$b=?&N*`{Mc5Xi6HeVVxYQAm1V%_68U&?=zbA5vIqtBa20#fC z|1S(7&GPh3?K$@$-Jsoh5sK?1B8=OZm`ZhVW=`;Aa^)W-o2oL%$eKf^?%+;ia-77V zryvIs&9Kk~*>iR=1@9IvbVFFoE8uE7?YF$KHgDI!+8m9wXR&zTV90rV9%je71B9*; zLJOIWa}8>fD~Q2%e zGs}-;{|!x|={BENO&WLMgWy{AjtM#X2C$2G_brjfb|}&YB}fcrx80o@7XuV=jutJDGjt(G4dby_#B+)yFvzOO)j#6JjV<= zh}SJNagLG)A2Qo;vrb2!eyaex)*WyU2NU_D4&41Qcu7o=ynCdkD0X8_L#stnCkLY)WVw7tizh`LkLK+z zCRyC84Qq8~P8D3f7_D`LH@AKieeow9kr5=ez3eG&IiB;LevCQa8h}?W>R=M_66QjZ?j6 z=WMIN`5g3|)*<#k>yh?C2O_SBvh>Cto8HpxeK3PST2Q{#<;8?71y5dda$yJB`C?T(DHb9sZTEPA4Sshx$!73dSmORAsIWyS17@AY?V7J1!(WTh#N+>_z>XZ zoZ+v7oVSvq{Rr3q%yw0P0lxu5d{kb_8HYX7SVI(+@wjz1`GKj)Et)+*h?hx-3MU0d z2jAfdRAbC>pSCviWWt(v>z{G64(xmt711G->A~dwNm7X$3YoIri*xBla#mo}Ud&@! zU1I|CdUV1B3k?l6`WA_#uCMCQxjteEh_Xj)lGo^pMbH~?0^Bz(%?uXCJL*M)YnJI2_&ujhSA865&X&*+XebhM;R!@P12Da$U15(KUU(KgBY%vJ0arDlBDCPIa%6OO};jQMm*>_F2RGW-9yFdD_ijDRv} zZE6eHhr31az}b7o%f^P+)po6#18Aqn?9BIw$Z-X|fi>{n__4!xFRY=g|3WL){SmIM z)~om;jT7`J9}&c6WB(B-^i+4#i?hydXWN3r|4Otah%^ zX_|~^ILl3h@%oLr8x$`;T}v9+}3R zAK5Fb{Yb6|^0h*w-Y!~?DQi!4`nd8eTy5<0zx<1KjXP+uh?IZ%ZWQ&M2A;d5w%dyA ze8cLgBiZEnC>UMq=oINGgsz)}w(~=XumIe~ZyGEJGhpO@eSri7lF24^!xZUKUZBYC zMt0g%g-{3FqESdu6cvkBm(zqk`4HjiFjXWzgSEwishH)t^>TgefX(7*DiBV}N z$)RF{ta#A((P}CVB<}~~b32}$QswkOa-bj%`o08ZexQB8b$6eO8RAxC1e+Ha0SYMt z=6R)!J||t;ygY*enxAWMyek`0r9&rwvLQ6(!v^4X^U#Zk-)h zB(baQcP}#4f#cDE6QRWh)efE>1YwvJTbrMmxWRTUOXuvOpQw6GhLa;mm$OY7-{>tFXZjlP`;bN0-nf2oV60&fTumL8X^ETntqF2^GP*mz{kA^(Mp;YUIv!NhYp z?(U1~D;oGF?;vvX@wBpK>J39o0aV z!uoh>AwqU}Wv3eGX8SvW}1$oA` zHKwaX%y?1~z>;+xE~2Ivk%+SAqI*tZ*{8z2Mlmle-rn#O5xs8g8eR~qui|aFyjst<7*0c93JT$fSQV zeIso+=49NI7lHIjF*65lqENB9WNt#t^3ZeY z(>o;|#eK-^IqLu%B zhnrMbNjEC(8`IPTHf>U{H_qZp$o`)(8NEM54Qo6Z$j~VIpLe=c zv^RnAH|eg8O%Z8L+KJU9z!|a_E*7rhZ{$rnjlT^JoSoFSk{@uhHN8goYGxGC+zP74 zXMqBNL&TGp*)Z9hkbH?%7$)f;2S@b5eB6DgwXA!mw@^tYA`25}^Aj6XJI~>v3@37oYJG6C91c%g$qo@5E^NYa> zMYdP?l6Vw|H)jK>kmIywaHXb|0BUej^Xd3VMt+B@3aD5sP-d^wj94+X#|1;qiXl~{IYw;}c-?5kZcwvoBX+JoA`=u`|&&Yr1^-A=R z|HIP%+#bi`9M^v!r?$LMJvv|tkgp)8w!BZ}w?+IzKXG61JDgLNvPhrEDeW`bn{B4x z^dfB;qZBFnSFaW2!CF9~ZQl>us|6+Q1(=p6 zAV&EApUiAL31^uz`}cv}`wqX5PSEV;xM5)=jb;|N8yPusb|5=FEMgq1#~1iHhvfmr ze@!A>2U&Glvy~v1rmrOja6bkw(e!cMcZS)MLK+6yi`3csB|H+fz>Pjt0BBsKAUjxv za!v|ep)Jv+_t)P)=FA6P8&3Ab|L{~KM_P#$-sCAFk7yXzh1q}_z(q9W&nz`E(R>q+ zN0BU9#b9{8#Ga$ZSy0GG$U*}y;eNCnor7}Xa{m2K@C==vwUADP zTM*xk(<0we4ve$md{@~on84pE%3ImAjk8Ajx>Qv?p5?0<{XF4nY7-Zb#FGAAoc8lcclU?O1{WCh_3 z=!kz@bIo&~G|{S9u>8>W*a}kH#;FICMboyXeRDVx8iwB!TlLDujgz> zF*F^{J+I8|eVN7m1Dpu%sGBY&W7cWad^q$vCVBbS%wc{z%MqZ$+yL|W55rh834{0`^Bl5lLgi6~Y$ zfTo5;|NC7W&Y}toFAhTRW!CS}b$vm}Kxd{||4X7pfvIC*IxpMH#1j=dryl$9t85Q8 z{fA%5G!+qF)KvfhzIMZdSrHiiUtzur7DRk;&{hKs;qNwI`uPAFkb%Yh>9`V4hCa>s zi|cAUH@pas%LkwtAfS{>h#^7pwMp;Jm=xI~y8$dTYFm;MYOb}=3qF9ybP(YCvm!Q_ z!kh|!_##P-88J#5&VdYm42kiUab@v~zfKBTenam-oGAmi%UHyJi+#hb04^j6=crbY z^4UGu3w9Z|zA9AME^+wmr8wWpqtb+rhW6V#))WMNrOj>0D|@I@`3%a{B07yFpg z`f>&_qP=`+y~xrIhbafW?}vu5#WL}Ce>82E44ZNf~oo3U*HQ_x5t#lKNP?>M3|nJ|FnAl z!-!RNm2x~9??t2X-GsCPnOfTRG0OXM3f81UnX)avcBK9Vm|nQ=>%f^Cdo2^1nw4#=wn)3VrXvN5qm+B)M^3fw%W zw=C+_P==<<@3jJrq3tLT@`%^G*zjeQA+aPgBMolnwbbz~180C3PCl`u=Q#2|E#qB< z;_CKWk)ZaLK1W3lq(^moEpD4lHY?lj@fAz%Va972+c2tIRe%sO>I3;(ek+(!h?QH0s(3%_lR1L--Q|cewBcJ z(5rSzzz)!>x{ah)WR-Ntdwgfi?f-QI`V6GOKQ(t^9E^_A+ruL!j&(9M?E;vyv=7>a zdF$uDZVeuuEx0Ykln^x=Wosr1h6ehVeY zEtib{b$e}H0bZ4Ml{T)4P6Rd^bIE016-NnxSO@lxIFR-st&%2=@E9dR!TZ!cy9mduSIcAqANan+#SFiTr?bo>l&BrPfa}=<+ZHHGn59Le~Z8prC!M#y+uitfo zS+x-iV5mC{|`% zJNeb+1kCYzv+FM2UQa;eVfRWw)mTYM}zlSL~YguGi^`6SeXRTR};p`C~bvu1a7%!b54z90^oq%SDt$iPE}>z ziy@K)0X^)SnsAz6ux!2naWuv(^hp1}3cp25+F+oaT3Eym%CC1@Eiz4VejS$%ySF6n$#s;DsO)%02!;_NciDm`mr6OrEC_OOIKa(+kxB?;LeH0uN$D zC~#r?{K;PP0&#Flm&@HSrSKT)o@SKI zoT)U7%uy&br${WPjEu$8!oG7GBnVIYYxr9?tDRnGv_#O+s)3`~Qn}7lp|_CbihJ`^ z?`Y2f=ugOBjH@TGsP>EW!%!QzzxSJ%e}q=BQLLFu+$7jE5r)yo9k%spjZCsH8%W{8 zrKu7i#}0uff~y&McLjr(s~+yx3@|lar6+e;poFC&t^^ZOlB-R@wRT0XwgND}DRn=n zy3CLR!7T8eC#LIqi<^wk3USWcNQDRvrH?l?!SV6Zjf`wgiIs9Qg0eekWW0_Nav~V| zka7&ysak$!FW%XbyJb1hccIOe3R=iwMTZ5W0QfMz=S}G)EBtYdd4$2c$2^z$Ohzw;kPTR&#^N?RaG=o^0?S4>HSttcJ(zQ5isXUt)_dHa>#mN( zp?xxMTj-$2#K)a2_WGqDdfX>l+y)zsU&+_!3YHNCf^Ja&u=g^|h-&|!y8QV0@1~Gr zK;Z5;GhO;~w8f=+l$ZQS=;UVePA?pTf89yyz|567BW=eUEM)>@^ik`IDk^3M0)NZ3 z=446XEpxzkI?Sh~6O)E3E`!g0%!3jo+#P!vn$|zprZ&FcE>~R4+eiJx=t*Lg@RFvv zkeACddoCKHb!eZEJe*^{+ru($l&CdXO(GfazrV{z@r5JAa3DK|!y@aKIiW%+zqp*6 zdrv!GIq6^%*JTip)_J_vio(1KxR!;Sq)+;KMtB)$&6ftK{`}H^^9HQ$AYnwH!qv2^ z@+8ZPXI8P+htWekj(aXOsm2MFc;M$VaFl77o;!<%Tj@7I#hM#pPI%DJ(<(z%$32Tn zkzFl_y%DknH(dHeT+{HsSDcW2(f;Y4xPf$y=~O+Gp9+#ygks}{!5hZquiN&x_Lb-* zose) z8`*-6$U4H8+kUh0R%A}z{T%`!+iKfne2aRs5GrMaKYY$#Kd84a2miMo@{FttH-{=q z(CU0nI&t%0^{A=RmuEEAA8zaBuuFDVOEB}%VD5dJW(~@}R$t(Ee)-EC-SK#+^Rv_- zeHao@6DN+JLJNAS=8@JV6TtRhJnAoI2E#ECvl4s(^LF|ig;W(Ps|uq zs0Lc3|5XSHlGr|FsJ8-YCd=L)o|4($J7MiK;}o8gm$21qY?mPe++DyD+_r?JTFNB< z!DVTLwe;B+(LCuK+Mv~_UPcMJwTlB|0AUWM_KHFol|wG>9nWU9W?lq)XJ_yTq+D`f z>A>MFZwv^9{xB#JjC~K=Oi|_j+1AGIiZDdhIzlyTSYIUK+K10}k`syQw%9%3g-l9ICUr0Cm90m26th_7#6gVn{W>V^e8#=8fw zi@EvD&q68zqoA@xLd?e^&2yUN5sQ*G`K%`*)4fBdz)@J)EY72F|BZeGUuIdU$e0^` zz~+zEB;acd{WkGWhr-9-Zs(OW|FIn4m(4?f#+(dADrnjFt+1)8|J&Ff&Q^hA22gAq zp<)D%?+B`6ipkH>f3qCwna=VE9K2FIc2OkY0`tV0CZO4)@Gp}VGuI$`qRk>ZlaVJr z2jb`}yQ23NN=%=-zz-XMx`(b|bL`Pg1$0O{eO3i|oEckvJM5SEEqw4=i$f)7o-0 zi)W%${{1Kg&50S%(9^tb%a5FD+HC2|Ncl0K0O0fppJV|}^oPSvR>4AVY+N}OmRym; zyDKg*B|s4G#J0#wM%oc$Tgwbxe{3@|KNkKt)T#dgWK3!l3_|E%_0`^zci-Xm$9Wtj z}@cu=lAL6EA27ZM;%C^~?uiKWjMT%wx1&J9@mSyR&hiSQc4p z4E8bNWC%t~Bec*q=GkTZT+V(=-a2}AyoZ*?q>R}fLDQeZm3eb#Ifu!RDxue~b`&*F z|5&{d!ucyBcLf}YU$Inl?{=(`8``|y%t7%G%e;zC^3diKCvn$6-r`lciGr!by5^WB zr!{y!wd8MqNtX9~VNz|0$SU7!Saw7b*rimNU*5G^iVm3wDQZzUMfG^_|;n`64rPLT74?t>;k+JHV6<(97g_=uGxf z$I#{zuvBa?|NRwWdrfP8Drx^v z-P^G(hd5TmU|n$pP2&U#BdM|KWO~#|q%c2OY0sj=nw-yc!;WF_4uANGOM0@mfKpFG ztIPsH`x4Y{L?)R@>DB787|##@G~b^bPyid%32qoB@cNeh&IEJv^^bM&1vbKp zA(p?+c|8h0^@UhRzQ6F7-J7TP#AqCSZ}z4fJ8^lu%zBzL~)(v@S+Dgcmacr}o7uXTBxK$1z{#(re5an!HgI!9aOV2lIs zJrxc}o2>k(VHQ~88N;B384ytvy*8e}1;tl*^Yd05?-np>(*Ez_Pw#wQ#R#E1`F=Im z0P{@7S%92)`vqw#4`DX~C`gV@^P{OsyF-mh&?Dv}U+It_ukI|7d(3brru;9Los)DU zG&x9eEsGb@nCw%SMD{>07t}XE;S9cBVgJg17syYW^dvqp!l8H^RKc=Oj{Z>F1`6%a z^o5q#2zbQ)Zd}>li1Dq;6=5jYW{dT?8NJN$NzZxQHC7B2Jscv7TSrJ4#082%CJUNI zZ&(rd_eA^=dM3#J+hd-FiUo*u$T*@(Ux;WHb8Ev{=ZmkxbyD$#;aISRO&X?!E4`hUpz?Wdpg^#Xg z0bU!o2nLeE0eWusc3HdEvmF=>5 znvsE@l~sE*#QkaW2thT}|56?B0+noBB0W%-)cFH+6<`HXP5lcZ@<=WGiG;@jpZ?Py zxC{!Ws8PElaq;EQWa3xhW-(Ncz3BwOnoOkq2QD7g|^a%}ri2LuV(^b zhD{pN;+{ewT<;a!!+QKw2Q!lsa`&cvjuX#F zIOrM4%H09lwULRhU6-!ahIv3PxR+E+9XokVh3N>xxd$YE5lVW!jP!7U*QUuP5aOgV z-Pa3*vIYcF+M9bu4T+FzEf6Laxb_HUGqU)d)aJ1%1t}dp~`D*`7eLIEw^E z_#-hfjBL$%gh}2M)QFWu0^_wwlBbXIjbSguT!7);d6Ha)L7D3@_cK0x{+nuV^06lm zt&#S5zSU@I>Lu2ScLW6~lzbCALK+4f)}(iK;uo2BcEzX+G#jDo9e;CBY$$w3gydGfWsezux$-`;7N((rTsVYHKQ)DV619+xmxx zUY4Nf2LCQpN3|-JWqhaZj)&`L5vrZ1Py0ny0ROjE?8{^)Z&~(>34U%0A9K0Y?xu%r z5JcH#X#rmHUpb8as#v3J)YBt;5DTkO`s*orjQh*Yinl#)oFIgPy9THsD2JHZV zks8=DPnOsVXF~{%h#pXp6APKI{@y6&vbD|-#|Iu3(0_-75sz36nl%U!z6WXH~o>2HSj}* z3QI=Vdr<~w@1jb6DI`NZCB$``rQ0noEDUD-lM&`@gOFjLLQc*-dKRI7x^M^gD495| z4!IX?k`A4z_N3%m=mPh)xzZ7qU1nE|I?Cn5HUDYD#ErVX40&c5{%E;zVDmbb&XLvy zymU_9!1-;~3dNW_6}39(6g+|ZGem2%K*Zc>;272lo^}a?aTZ=)0QP}YVOVkHdB44L z3D6fYxB)hRDzm4MnBY%v9Z>~_iII%MQfvyD86+73H%uW55(22ho)b*bvK@C7QA%8h z&^vXl?H@}{@+yyqXW)B8sLx)z3kYL{sOT)#!H#&Q609EXSj)IckleJt|6cYz! zJ>`#BR9RhPqGEukH_BUj_~~n`%7l$$Q0r#tMVAPi=aDvfjq=(1=Vl_TESLZ3xp=ya zPBLRH%QYTdY;g5ynt*QD_~e`Vl7;pDD6pfE91)YC4x;2^`N%b1O$k7ERq~4;Za~Wi zwy@XxOFOy z;&#L5Sy(sD+GltT#Tb;853N!qOL^3n{eZTa|5IF}zD|ggV5#AK?v;TU0N&FllQaSQ z(TUvvv8%a54Tc_tIc|{pZ))Q<;MWWUJSw(X5X&g-b%VIkr^}>}wR?@h{V71x+9371 zvXxNC(SyYG`EoVkA3MMTuG(o#KcWXYQ1H{HwdCgGwrCq)7_=duzYo;T-k3*o76v}C zfd$HOvu?P6B>7>tQ}5OG&E^nu^oSD`8{LRkRtdob@KExOkTCkl573&ZHAWlAZQ^hA zn+lRA!xT1eO5>;DB+nZsZh|Eg?thaI@lcpyc^VpCucY`7V;Ow@U=#m+;Hj86_%#{C ze`S!Fk;2Tlkya8sxO^!tBW*Xxn+UlB9;=9bh#*P%yxNl`GH^(JO+h>V2ZPZVrQoyV z5&<+jx!!RM#GWsdJTu2$2`$ZkEnC*xtzo%eSeNv3z~LJ=dWWZ|wbu1sK>Q;INo>wH ze(&ZxwA3MvSU&3~qM>9ZPB)@yn7$V)D06{Nsgr@B9*0h3$S7Dd#YE#p#wIVFV4IEM zV+>WRun|TExC5C5W+&k5FvqqiQR{DE8;dc-%WLB0$#lIq_&hdkb229GZA(mNHyWGQExCD17(u&LUhFy!)YyrxSQsR??MY=a`I(+*`h*E&||9LhVZOG@9GKKZROFar>izEhq=6L zrO`mZXc`4yu1X3{2`MGL-MiR~R_!mh0MzqHwjGIiKU0GXw{DO2k0pd+zf( z=X{8t)n^CoD4?d(i8i4Mh%00X6yc^Fe3{5+8wSfO;XXo1*AFTEO{@8tv;kt>V6unC zP#x}UxYar|nBOvnjWCQh$oYKRKmQ;KTcc(Ei4dC|XY3;tAaNlqmV)(|hA`nzMldU#wHXI}M1U~1IGgG(8XzT3dAAkhW(*Q%?~ScL0|$Oo zN)0&kI8S%ecs$Oi9roE-E~DIhh#7EE2q-7yCqCQ>q!cfs#?PpryKUunF7GBbuv%?R12&dPHke|2HI*ZnHti07nj$$I z8M-hox>)HvaNoo;-$oO~@+~K%>}>fzQ*3-URsiz~IBJiM_C$l99+?;jLfLMQI>PrV z*V-%V3<&H?klu8AY7EccJ#A;gpeMJ&c{+{b3Z6(tE6VenrRNHNTu<_gp85-5Pgg9E zJtEPX8r4@vV;z=dO;nK#Ru46zyLwz82#CRo^$XzTZlKy7|9SGR{+r6a4rp=~A6&Y7 zf67i-YFkhh<^ZSIrW0)V@Wr~JM4ZE!p4x5x3Wb4upnKof*ec@>@_wCZRmw_gUlZJT zG{KQRPhf2>J;`?>{~<7sf{;{vhLv*7q3{kexsP!Jtr@fd(8KNIrm+$Hi^~BWN|eg(A`)Zz_fSG z+(_AN_S^t%FsY)5xxKgBT0P0hp4xAg8epq8e#156tW@wM|FP3zw2r*=T)0BBAK+-aJ1dv$?5PEoA#-| zG+m7WDBul%^a$Sf-F;I+NlZ@e&U zABA&d(U6h!Mqv+v=Qp4lXSf`{!BFwuGAA$9Vtd6-^T5XxQcOMK@E}{Amh8XJcC&C1 z4Yv$LX_VX9w)lpNz{T{g(5%E#nDP0G*w+k%A#~M;3=3Fuzt$MC0eN@g&g>*+^lVZF z2=oV+t!!{P>3N*GYXbtGdv;a41ZlG-A8EcIN{S(B`5?RwJqQl8wd$)f! zhwy>3EmHMjiEP#a%@k&1+gxR*d^2E0O$=yAMNooP(^b%`b;5N{om@+%(hgu<;)svU zWtyHKdGQ$Q!>c+mza<;-XE4Z8M=O zf61POo`S9zLYDS3vP$9(k=N3Q*&8+28Vu2<#aCUa*zve69u0DCkD~dZ8)rV63x_tm zG0t=5qpC)^jVsxSXuI21^g=7oY*2NzYGI4j#{R;v&J%H@Zb(eqq5>$lF=gIkdbjR4 z6du5Zn%mLPK@_Hcc$g7NcY_Y-h5!Km?cVr)NC^q=zbR#NB=?7ftRCWcH9c)7!X5{c zFJ>wQCtN=OoJ(RSQUUsJEM+S|Y+4Yn&+Y+Ji0QlIV^!Hnz5!Q--971vg2pj};hSXQ zpxG6&um+Hd%%Wq8iaKu9s2Ftu6CYs!AU9#7zj%V^TQNisgj)*JQ4j;M(I-!cJba!I zumAzSagH7x2z8M>s4TD>!|bZFV1fpCTviZ79)N~7A;sQrgULlL%=8tHY<8-5u&8vH zeuWSQgN>(j<4AW6^rp*PWv#MQYdnNa_1Fd3&B8Q;u{VM83pzY=U>D(UiHq16&+*zQ zZVEE<&-2@dXm=n%--Oj|UhX7Wfl@nzd1 z%BQX1oK?^OGd6EyaNz^ic)77D=mpGtmL$Y>PhuX{n?eOjMp!*_!rHi_-NY1a3@eSg zxR({`ILUd&bay#_#8W16&?uzP7exb?`SFYZFf7=;-9cE$*)tb1j@^@7V1PRsh!kPw z23`Kge?+A^cJ|bnNf4w}fB+Sbc=pV&K*B%;twFmQdH^w)Lx~aU6YT;WseU3pIP#IK zI5SRRSd0Qt;*h<7m$1u30J;hLp#TMWf%hy)pqg~!3wrf5ut}1ZIYG_!Ag1t&66}AK zBUZnE*s=~o6*m^>%tV#B(xVy~Z)4!u?)JoK54)g;o>;Q9F|=)Q2tjK2b}4C^1Z-_z z?Dd~(R~cXYHFXnVS-R=K)~QHtsxb?*9o%owaNPW)ussa%hYd#-YZN}ASRL*9Xlma= z3j79`+|YaRt58Oi|1?v|IRyxUiUi!j96?`XP~a?DK16X^|J;DJezNWEvoyMGmqu1@ zGC_!Te{31dQ~+jjNSbOuC2{Y!bs+^{NIdwax8~@pdz$kwyH1G=VzOivlbha7^)7ei zO)SylOex6^p10R0fuYAdW4s~q3>TxXF|9WenFT_8h{!I<8GR=a`UJjU@Ww{_`1`Y6 z4-i~t8;x{QBI43KWgiyI(hC8m;hWk|{cV}c(80^mEhcVqKM(+MgfM?|6##}+KX3t< z9kf(k-w+f`W&aH^`iazDO=gbIwol@~>cE@U;PRuqoBI4}-SymSht>WuNPH;vab|@` z@V14CH&BiU8PS&o>b{)KcjbiIL|+akQvN_EEo{Y(QZ*#t-=JDur~`a2%80^$Y5jYA z#--!ecu?Bg3*BJCv4#<~!#Uf7B61=XjQsOegn0O|FF1lMi@|nhj$aAmT%upzp>f;k z=A)x4rRRV2^yOJ!(axfVqgV~_gX4sAjbzK$!M*YhG7*-%5anKWSU|$xXnOMU$?^-u z8%6>r*&M5)NBevG+!6ENN2re8rh*(eB5h)3hyKF?lhYBO@MEOo)W>7XQ`bu6CjbC= zVmMp{3J&$1+r+6wd4VM5NaEv-DhtiBNB1t7$N} zqR1;b1_^IZzobhd4(zYOY6#y`!MCSj*N-ZOSHiZEUD~%>OOnZ-Rc2IVAF_PySq7rR z(;k=&sH-+)x!q#T140gn*YjO{!2e^ltLT!!*a{fKGGC3#dK;eIFnjw8=swpS7>`xw z((caDz4!pKhVd}bwxdi=w%mcEJV}A|b$fkJw-x|jvkva`p%XF7yiYM$>aFTR#^hjd zQ{2Jt{yAJZgUG$R141V0;>rZUPFVW{*Yky0>hQ~*wb!1PVa9&y2`5$%t)7f#E9ZzK zQ6c?K1TOT}91;o^r40_;X0n<<_o$?7`y21`d1RA8l3g&Jq&V}1HHG@Hk~qG zYA40q9PWu@7rtXq3Ixoc*AFuEo`hzhZdkWz!x;o*%pib1z5(4T!FCUQb%mN&4|GP; z23%`7)yKDVXG(hyC-xJk97iaMV<_}CPy|ga*z6@u%}Dj4CdXcmvgDs8z!Kq~L?U`WDy(I=Te_1k`M z)Q9CEgpX5I_+{W^8P2P1QBxMI96TrMN}RNSC-4}L$nkRstZODpMfg?|TV3!E5aPT?OK;dCe?dz=EuRLSPEXZ2?RL0^@)p z&b5EFNU%_uO4})=dW|)y7Y%mKYhz4Gi2KY|Yl-DwJ;fQ!$QPRG`TZUQrzgWNo%b~r zAEYEK@0>?Dw{jFLq``FXmT78bFYK8hhop`)lYPhd}l_g-^uB7aX-|n{f z>sYoLHcdU$ESWz%qTb!{7&%yF7R5rg2Q^XWQ~`xF5|}bs_EPDy>HYCoiq1-> z)lF^63-G2iBu}BKY_bqkdJwJ~=CGL9*kZnVp`iB6j3nsg8Q~RYIQ*2l)a9>fPIAqs zz_wi2WJ~AtV6Agr9K+u(a-Ev zz@WDG2qm=YiVf7-M{b>Af~9#%cFrZ2DtOLQGwwVwE%hI@?M{UHAH_q#^iPoc1GPnF zLmzW*WTPiN?XM4}4wD$HhHOi!4AO1+#*STmxm-={e&Yat1mGhIrj)%}J6kQF0=|n9 zwTYGCiq)lIm1`}}-k8co^MUY*-KbPY%fcizgn--ZJ)_0kA})^^u;)j|5QiV;Q6I?9GWv_6e8|ly?XpNp{qt8r za2%4)lG#Xm7Zh9mznEcJSeW{ppIVFzuIGb@sY%!>RF)1FrS_d2FxU1S=HnwFn=6Q0 zB!D4RO|X0GML0j+Y^g3y)0|whG-9_3gRm*2f@&O?XxDFX)Q;~3K1Df#I-=zbq5J)* z`kkI%C&+&>n|vf6ly^WXHqWrPXh-7WZJttI3uZM%Rfl;Z1wgUG@ae;`LzjNDqgF$X zbnORuSak~uWstrx(!vU;j2pcjr!rE>Vk;U~!PS)z4Gt)Lj|2B8 z&lyHoSbl5%7^lFz%3ZWU*J@VcgP-J7fgvgvLNnmXo501QVZ56?JhRC|x7E|$geFdcSZqS8@sJDUgGBR>n9U07SFycOnEceD)h zmuI!8JV)FkNT{czA$59~KI|}#XA_;pSrNyarU1ckuvQ0$?AMeVn~AA{-vnv>03xF| zW~higO&Vzw%5>k#?eamqMVs~#nLUxVYR*ILdwFwim&IH0-Lw6=T2&ffk9uDMWs_eh zzTO#R#cat-gSQnkH=7~*S5c{mk-zZmEBB8m?{fAm>@ECZ>NDrJnFnGDO>N05OR)0a zSnc5NVu4l#mBYh-+1USox1W$?S#==(=Pf`d@0s#q*wym2b!GrlQsRvg(@g+0L)Dxa z^Nab1`d>%BFVuGsqW(J-#gI}9I3~IHx9Tc+Z5=Rge}c3g%eD*c_Rsqq#FKh_X5pZg z=gYkh#9_by#T%Dw_1{E#NO&a^ZqJsSg7JfSuEY7zgVQWdjc9F9iuUyLVphU(_|grm zHuV6Mj@Dr>*yZ7DfS5j4-2k-OG4toURWx5^ks$*;TyzFM0Tw3qe>3No1|Xp+-i{|d zy|Zprw|hW81elP}H9*wMpzW&&D8UL!#Mi&{mS-XO+`+Q2OFya_i;_cNof4^6 z2E{BRs)Oc1;nxholYnQoKq*~C~ zFVKZxkog$e67*y^Bxz;j2QHx>!EIL**B$I z?Taw%dyd~*T*O1-ZNpXJEhgW;nBzr?q@N{Ks`yZoV&!1yHCNb^P|VlK~KQ){a#C1=gRWm@>RFUZRMu} zMLAjknqoaHcyH(WNeZxN&X;6-+)%PSv?v-IL07T;YnE8VZpaY#NKY}?X8M*!qzk5r z9ejrSq6sWkf8m$!&P!MF{8$6&y7daDV-9(F&%Z@bH?@5mDtv{o7DJH&s%fdpd#4ym z+8Iw$TMQ7;yT$-*#KFW?RTKKDFF02ozDLL)rN~?G+OQhn57FSzdoLMA{hzGav!tvE z0@R2pNd@Mf=8=!Q6t-?3S ztW1G3z$}iE7F{sk7jN?^cIl&~4C}!a9Ml3iNp6pS`i-GXJfA{lXp_~Z5MH67sLEKM z6^N&jUO{|U(BjRX!N3#;|6n4X*=vAvMeTf-O+BTIl{s?&apWr~ldld%$k2T|*gYfK zI$+M{3I*0RBGe_-E@9{w3_xDyL3-K%A7nry z5aHHbhw*$)L8I)EAz{C2+yAHhQ>H$ASycscCi&%d`}^Clu|?+B1(Ok=-3%0GVENq> z!frY+na!Fu!GbQ_)m7 zoPY=CVQcOORjwT{cXpd;sj9|hSFwS4CeY%)?Fd)~K}^{^Fa-$Gx$J;hSa0P&kZ1lR zhUZe8j{YI&aibNnNFzQQpFoB@+qBe2JZ+fv){$YB;ALBh|G~#4)x5sHVw}aWn0uaWtl!>lM<&3qO zP+RbB9R35KHto{=p=M6d>2mzdKAaqhZ`*;lD#?_`(0WRkZ(#|vN;ZGvI~o1DqNr+{ z0K&k4m!u%``~qr1U#)QFA0j{pwn7)YW+nUhAnJ8Eh{nxHq^3QlzEUbAZfG)n3>z6FKKR`F)vWyQk38q%c}ju zfzOdYFCuAfqjs`LLw1MjSR&i?ad>Z;2mLkc-z*-F5ES&1&|hL(B8{abQ6MVtWZX~H zxW2cJW|1`1=!ipxm-$8nexO>o|k<2E^N0mAh-D^wJr z5aSiCR;w|MD8a)X+jMa&R~`K13!`bgj4WS$u~2>h9>TsB#bs9PD&k<9no?6ToRu7D z{{#Mii;FiX+p&oSN}0RW2Kvu;KD1`Pru5CvB7z8TESQKVQ_rvt@+?Y`{f#wun~~WO zlRmU2xH~rzO0O(cKyRM4Yl$4~8+gvX>%z)GrECkv>s1y&)xc47nXp9J4ce-TT3T&) z@lL#o|5Qrt=^r9KSt^!7hOw8S zV79LFmG8wmA5UPtd`gB*(7qz{wG}O1qS%c}af&pt49%dk^cu^9!E}fpDx!h5Eds9ibDRjGaYo%m|*$im3#dRI9s!i=; z3l+eW-X$2@WezES(6AqTL;iu)fh z=meFAW-W^~u2=6E2XQ7<*}1=U;Y|*SGnj7Ev;@A?H??lHNDV$4!dkX!KNE^%$Ar^E zCz9)K4P3zyuc^n5DQB>**WM=ARk4>EjqQmI4vPs``yqO+p0C;Zu_`8vX;ZPjahF60 zc?MVdEHNZCXL&OXF|mQ2qlPmYh~Q~BG4-jB6V&l!*J%XAb&#=`h4E<>FZq@^7x!3K zSD!ZTwRb{uSUfK5MrVy3J@{c0@QKtM^eAxMtPM!Mgcw&QMR-{o3(-j(Es+4vg8bgb z*BDgM%ts)Y4^4w1fRY1@y=owwMD#K}o|65$Yah6+OSF*@?5 z3@8H-QAT>FM^za_G<`lRUh0;>ZpEZg8`>fU{kEy|f6<&f%H}ki-#_G0S`zHVwp1R% z`{NAUEa^U=GOM?2dP;zgWd}uCdyeG~46Ee+p0Am`_^aO2>eGYdy#LlsfvY%) z$nf{Akq-}78@WXBUt@K*d;#9Im_vqHP{l;Fb0I~89E3a?y4#KPL<{EKHw5ad&}}T@ zURBH6AJ}%5X8hw{%$Sz4hR~z-<6-VV4aQ*NH_JS*zA_Z$@btbjD8EY~)B667mY=PJ|gcsV~z5Qbc?E zSmi^LW~0i@q*X)OO0Hon&?>#hnlr^WFL2;PQG;v!8=bxbTe{)b+9QUt;wt_d6lk9h z^2Q$yx3WD6>eqNMT9_P)?h4uQOegy9p@VR?wo~r5lU$#r9rMRKG;k~oce>r{(0Jx! zkPys8k)tR&vR*rGM_j8K$XT-ozOvxYT}{J>|BBd7O%P`2Moh1$y}PF=wmgDkVA#mz=LBT6gkpg&dSskgahi6YG784p*Mu3nB zi2q_iEDHd1lxEhi9Hnz|c0d?%72sUQijFPNC+jI5)wa724sVA-ZNERW)(9y76T$@& zeURLhx+mh1b%lXhoO26`v#8$)wOFU&!)uC@uST(DD^j4LexYG#(w`mtZtjZebde)| z(~lbLIqk^5c=EO@&Eoh3K^s!tVV{2rfsckOEQ!pEw!O=9YDRaL<4bpkjA9H@y2o33 z=6gsWX{ftnJHyQC{0;J7piY+uSyThZ)W zE9xOOuLDO}V7XaqBZ;QR7=~6dkSpG-_~KV82<%g;wZOfas9|<;q!FEQ@q9$;V_(CF zxEni0k8J{#rFBhCx088>dl$>b!6*LjXx9MTEBVebJRJ{S(K5Z zk<(cBzU&KTYSF}zCN_r;Wuzi?uTa5hYZKP7ylVK?sC*-Kwah)f0`7e@+ruPk{tc<= zR6Jyv>tvnKSjojrd#@rZ9_xpMr()>cbh!{9t7C5-`khw}yvW_GD9ne*zT|{yQ6_ea zDurHUS7PvF5!w_;UU6GJ9pQ3mPqA$3n)r&x2lPG!Gy{YXT{4-zX}=jgH?B>;RB}cC zp0IRm9=f*|XF;Hze2rU&kl3XfhLMpLBMu`S$m5%5 zaF1H~3EZ6g$??o5pyO2Nr5!fh@-P^TXkC*_`(aqGF&TvXi22Owm}R=_Gp*LlJQIZ7 zdrW)+#viUKzLyk|+vZB&`7FY2zLiND16MTh?k=&{G=arKUGF~IDH~5KpstJbS{1du z-mT4RatyR4;6kQar{7~5UQe28{F$K-kk{9@M@|JN(VaKGx~KkMZM01;5VxH#Ba*?~Teh(~YoU3(SQ$k$$+~7|H z8TG$6P-22iGmAL^d7k--oQuEP&&Wo4+ zd(QExgts+;C%5}c`n!d!k9=O^`lj!_t^?9W*O7S)!{MrjBYdNB>E|KSW6u_J8()_} zyO?>TPRed!h5K#4Qh%(z+#|LaTHsxRb;S!i-?|%^k!N@LieY6#avho3ze0}U9meW<%tlBN{@&U}R;dWYUPv6Ox z11|fc3~7-sOq%fU{8|oq^s1v?wsKvt1`zJr$Ve`~#BufqHtM%g6e)qReEofc_A*&W zV_DUcE})G=ZbfjF2@3=YmLqR-8Rd`B)H5If)4XPYi7=Vx!mdg5B_|}>8ST^$YP+dd z4?j%FVluST4hk1or?j>%cx!}N@y0Z|)&$7`>tjBIJBe*KO|}*1bt5jz5%gX$Oi+l) z3S=FF87H=?9Qz0Vl)Q%3taQ`0N2CK*BbYH6Dz}6`YS%3T;@V;yiP`3tZ-ZQpQcrl{ zw2#D|bw_K6&IyfOh+M#TQkuGK9etv8f*o5bH)kkH;P)mV>x6QTTw3$%Bg_r5ymDlr z__I0h#2t9xQhMsWOlE_#tF@d4 zP$H6a-unf>>Y3SGI8sqtPSH6iB|Hi#SF6oDj0SPdwxC!)DI#hX_!!NxRx>R$wXd~- z(x_7JI4yOTUAlBedd8Pj8nK6x`IR(z1h)Sepc;TgVFT?L$&=8_-xN_IQ!I#*?nqt9kWE7jEgIRHHvoZP+W)092VtDo z8~&@_W6(OPI#uYS#9uVwVAmT&(z8Str^bNqWH|$-kRge!5?|OyT1wWgJ%0Y3Fz;q@ z(b3YjI8g)g!AI8S`-d|cB*?e~!duk;%l^;LZ)(K#F6 zhG8W0-eR=Tpo~y&gB`sX+n#T3IV-mi(;+t*u+!zxf4L@?%*F>+p-Jk}A0M1u=m|RxcJl<5hg)IS(H-pd*}J|+050MUKuf)k zi#zpbXmb&buup}HM{vsjDXgX3cb9dp~-wNTcCatxl73mftU<5`_w{e4l`+fn)_W`Yt zSp!3#wjN#On^CV{#7GbuvbJCkM5rlY-P`ZW@R~1PeGyQsTZ!Ij@hnQBvk3=1w5`u! zlI%{cy62?2s*WHB@O`70LISwpTr5kC`KGM<(H?_|+xZ36G%4jA0(jSl>afEeoz~vP5f!fiu>vVw^{zYGHAw%Nxd#Q~Xah>@th+vmfjO_~l8?t~A8B**U& z4RxJs2Q{5msg?13Ojm`aW1X7mzkN^LiY&_^&~@&^(eo!czQ@$SAFskcGavT`0kIX; z^4t9VRmhZBf!=yKb6sDy1~^KDF6b>CDLQY{de{qE4^K8=3NG=nIpwKZcdw@M#py8# z?ZQT)q)@Aj0dqVi`GBC6HzNi99C1vayJ$Q!mHY7Ev~jI2OZ{VBVb_-V)v+PcMSAtZ zXilM1tz9K{JUGZbNYS&hQsm%*ef}=fCQMo~yI$(?7ry+m&1}dC!ZgS=F0F5v*G}JK z0_u1-b%+dYMRN!2oqeoFGQT^((`=FcI>uc+xFUP8HU=|H|FXx5sb1vwg5kEjThWg$ zp}!!G%g;zZ1Q+cgq6tj=aU^WHL8q=M0NpV>rE1@G9NuS|QI&QXYQh<}8}3^=!2a`G$05b=;6YD4Iln#jk<)mEke#~^$VL`HmiA))05iaEsJ>m2dmB)HYW2T zhkE@D+M_x(iH7wK1?wzA%hzXDh6306-5y&MR(x&jR@o#^W5I`4yu)y zL;wXZSC96dDjr11AWePpRg%S9#=(8Q!tOTn=29n>(`-re6^m*Z%#SbYq@gN@k5m;D zL&u+6do?0PvgZs{8lP`cvN0<8udJ1`UchT_nY6a4$Y$6Io08|cu_Yfg6h;KFc+dQW ztk5C~deozBj9H-4Z?cH$foLD}slJaRYlt47CNs45E*k~p2E;)-i$RovAYn7mA%g6F zktBwq)T=)ZtYoO3DQRp+GMO{GB^o@e(VDdo-YG-iEgOkS_OzF%h*Uly668PLmaH(& zu!P+Uu0*<)SS-B4{OxM>Vb0|RfOswplx-*Lao0N;)h=m@a=VV?b%nflNVjO@DL62^ z8$D_eE^<7%$6-W-!X%PK8o^ScPUbOC7vm9EoHs2ODDmn8g%|}eAr`e5j|2l>F?sn= zjTRN~3b7O!c1)VzW9Dv}L4&X}>%Oly$b7KXVt&RL9riSN>Lzo92y*(nd_Cm!j1Bm^ zhV`9)cofjXM5$#jtoZFzZi6ce3E^kaIa}59ZVH^^O2P=$6C{usVATz?7MuX5XTVno zxSlifP*AdbZ9f0uk16oG@msYkk{DG}3|Y;hz_nT#HnJZ~-L@FQk_TR%rxA^B%KD^v zD=35Er_~seY0P~?Kc_{80&X5UlpbojN8O$)e?~CO1;v&pI+p2bdkYj?X>FIq6KG^q zV!lFv1D8l-o(&%Lt$y>c9D$FZi`V+1F;*Zsgo??(*!rP3J{NiU&JKS!ALGoUHpwku z7IyR#ZWQyTcc)&{EQ|QEn>1A2i)!B$O(Cd!w9r^$HTsU>zz1sO=b@wA;PDNhoc>K4 zs^0!>g#%o#n{x$xY-0|q6{PBL-asPPw$%v6ACYLux3MK)%l0XGnWnvJAB}&TPSGW- zs;=3n+vkYG20I~K*)=1i$t)dwXq1mXl8fu=K`yXZ02!^lIkfr~N2DHSzXaR+qOS^9 zsRp2FYF1zTk#Xhyax%T8!*nDKKva9f@T&FZ>iViR@d<>iK zHg_B)dY}t1j35P44Lq21c@F0`+FJ!(9k(!Il*2Daf$~Hm;G+bCgpt^0q-^^^t6!`I zb=K+D&T@7ukulL?{8l0bKXBo)WDV?dpb9c_H-u_T+ZkhXak`XRv*&;AS|>Ux6LDL4 zcds7f`nRhY-v0h$C5cp7n5(w)J^osgMn11}OOd8Uv^N8wvDJ zyk@S{**<&6!(l8^T`W7N4Ui(GMvC%yZeEjazGUEvGBrR7xi4f>7+^g>I}}^WY=J#7K{{|b)D+5r#Oy9k4ncH-XkUW70weNdeQVcASW*D7Gq(lu z+~s+OuSHN-EB+pC2{bKQz(o9LaDt7x4fwH=Q<0c%L=?d{6dvsYGk^Fl1V{Ciq&#@` z)t?4zSH&YBC$Xs(iQzz&^&U%kQ+UC+%P5ZPBPt@2o*=qs0Jtdj6-#nX>1{%gEFQu% zqd|*W1Q?eQe8fMyXzxA6Kdau5r+XoUD~d@x(bP8YP~g+3NlI26-$J1HXzNOA(0R z_j1t}%>h=Rb&Y`;6=2|B!w9?~HL$70Qi2Ff0*Iq#ROM)ZyyZMTM0B3@@V(Ll22hTJ;$s+-Ch zWLrIOeDbbX21>E!9=~~lSsqxw6eajQBJ*k<<+-esO0psPic7AIbQYvNlJuaN5qo{% zmcN;U%DxBvZZE6Fvgy#wjG!etV*p=bmq=k1nj9Dcb!Y;BsoYhlJAAD6$!%Dvt*#gC zj{|=wTLxr&n&#HBT#6jZam_P@g}|<$_zD2}kQS?N0XWv5ma6IPrHan0#LK>Jjo4*X z)Cii1O^uRBk0js7@2-MMG%h9sTLz?!z${kA)>2a%oLVTZTtDPe^psnG01efqIYC;%IvCH<6auZ0Hr$fs7@tXrd&K}{=I^H0=h zQOST}>)-eES$>C$Wg&O_B2V&CHZ(OZcBBHACDq;bR;N|)U`?h()*aCTByF6U-P{-j z)~BL%lf_3W#;Ks|3I=Gw4Kh(0KFt&W3BT+|SR2yf7ffX3=CV6etqU_ug)AouBdLL* z1Kbj$^c<_@?-?_nH-z@R2mmZ>2{yC>$4KeKUb1JMv;*mVi6J3b;FrXm&IE_LG?HlX zf(rz-5bc`>e@H9r6{4n3g)EKge|rvHd2lyD&aiGYBF(Zcla87#Os8DA9R4a4`XUL0 zIkgKvi9SMo11ImbUb`S1mgwN8O8^Gi294BFi>;?c-#*?sPV}}nCe|P}%StlJKB4W} z=Z4)CY*8vi&h-U2E}}S6Z(;AhG$5PchZHDyq->h4j?Z!!)ih6orA?&ntq75AT{(PL z!im+avN?^zlKx4RP^$aouG-W{3PrfQ{6L*k0m?#u``Ai6#*nm0h}&|NnO#BTccl8C zH{rr(7&LNds?I9S;0>y1G;*%vKCs7y{>G#%vUPfYc+c31MR@p_T01*H2=K>)KyFSU zsl?(MeRUb`3l0)3cSe!|RG1u`zL-8&mBLx(wzhowxLsqY4_B{_-FS6H!FX}FD2~bO zdQgMc0+qV()F(Ss6RKb-haEZk4S8ZOv6e0O6&2OjqDUe+4a4ew1V=^3z$(oPO&~O$ zuoA90Yf$L3xCW0I$+)+N5X5!-vi5gKi4=LRi%W^~-xjlT`{s)f4biBMzXJ{u zES^iUTeF#@GXJw`lFDhqzZ1qOL3a0Fy}b&*@Ewl9%H}1J`f4xEnjMQ1ZZmIE(3}eo82Oe(XjfQr4{mt6YAtPH)I)^aO`N~dm+$MQYF1i z3GzFy)o!uM@6s94tI@m zW=y|L_91YF9h%u{lozjzh|TWcQOVZ5Jtt}deKh;i=3&+8#u@)AUJmKR1QxQLQGjDM z(0prx5#AeFmBX>sV7OQ_+jijh_Y25fLP<=i+3miSU-K zZHM%KS1gaqKuS0!NW&EBRk8|%Uxq*V(C_?|odw?FK*smK)-WZ`Tob7RI47`-o_$yNfR3*-BIA)9}7~Tt~20l54r@ zhDrIeQ@fkRu`kl>4v0Ma2eWJ`BlpZsZn6|7BV4wX#hgSlbUz++2SuU)W*uRae!VBG zJF%B~FlI`VMJAtnK=$1xLIm^H1X>+}I!>R7#gMC4Xh`WP6qU$IjS)0xzhq!_l71_xSnw_ADgD$! zwATXXn%q4qdv1nH{S&s_)?!GKP?)y~b^1vXO20xrQxN@enxIfI6DcPbyMW+NWHoiZ zomx}q7PeADW1!uZoT&UPPuCFUp$E5TrSCH4CjU{iw;$sjpL z@YSGxFAQ{LFQYx8Ou_LZZkiEIxjNp(KHd+!+ns{_yw*p-e75Hhj`t=eveA(_I&-q5 z)+IlJKkWh7L%0Bw;!nTZP0DMLBjZdHMZbh_P^tUc2H#Ua0t>>s&+VOCOQu;XS#e$NXNKt&@10$OPc6krdq!j5C8N8*Z) zSQp5<1iCy>N*ey=p?lq#H!-6|@YM6*Rrk9^Wb)=)PE*)GgVlNh5O$B>_7>W)z&MDE z|LTb#WbavBJEeD32xjRMj(a!|54p_7GCs=%Rppm63 z=**ST(h?T^u-YP0%t>i^T{JiCIlz734?3#nF`Tzec?P6N3iFNV&KxjqTZn^`h_(hw zL%F&E0r(`C^9%SWbOR8b9|z@b4ggkL!gu1an^>mLxyN}sne+Dy|C%|Oca4QHx7Ax$ z3;2t&2I_#xeM1)f3sDb|ZYgwzj*MUO&(D(uXXvge= z6pDXjuMu00?irux=CF#v8+QmW`^IH_&nFxLeG7_@6Q9v))kl$_5Fci<|>VD+CN5j3`$CWj(em&i`!^fQ0w1Ok(H&*eG zr#aeOA|7hIVI4t8KVbYabkVL~K`{$fKlub@6TX#Co9CRtrLw&#aGQRt~44*bwdY&1HXH8ZW(5W+ZQ zzw{gtN`|6*a)Ak-^qeV(uRCdgAFD-+|JyBlS)Fo-Q$K(eh9vPC;h~A^8PD%|xn{T!26#Jmjem2*f$1L;U?pH=vA7Na!yu0+ zfiXLmAGm!L+Vl3QY+1s6;V6BtP+|Cf#6*XL*fu*YrLNUhybvocN)_S+HC*O|L`4{Z zY{Mruc&_8b>gRwGNo4tVzB-P&hGx7CAVL&Dc;wSj_@((bVB_2oGF?RT0eQ zKvf_Ttp7bfr-NsI*`=SWPPWg_*BSE&!fzBqORs9Y4dvifF}-q1{_c-K0g0{W<0giX z=aYL2A*2HDu=yV9#QKvt{da;Fr>vBFAq8At{{BL>mv$NPY=<2?{B2yRIusiE5e7=j zy@fAtD#6mG#;X#4q*`o{ZF0$Uo^I_=6OBk>ame1K;h|&&_Um1c6_lYXlJO1L5r`od zF{0o3%BuUMH0`7)KiVcnk~=b;^7#c^W%3<9zHYZ?=Um^aN{>*Q{a7Q2GOTeAmC2SM zqU>UYiyI5@(-XunB(7t%viuO_qgg-97 zwNW3%eF1O>8K+9iQCXoDTxtmLR8hTV1&`iv##)G}K~Q~9aBloY>^2JIr?aS8{Nuk1 z1Ape2^rSvD4rS<(ka4Xf-FurUBm@ReJ1CSMF#;#~F`>HUk4yjA(Bp@rEW`6_$ZmTq z{4M8(Cub212ozq(qa&YZdCnn`ujV-t=Dr4GAp~qBt##bG?3VHh6FYt-P5m1v-snonL-13HYgD$!;9l^tr`XXt{A6 z`xPw(TuOvD@U*ypdwwj}2@^9YO>LqOzH;r(NsNh}_}%2ovB zzFf^Rg^@_k`cpzn_s{nsTeoG(X(lU#TOVe)_*)OAq&3!jo{;f-2CF3)opSRkSm?A_ z3Qq`!#AtT6!NFGSl>>ecyMw_FA~A&rA2LDlU;<@i2i1>Wb6psJ(5t0< zFsFCw<-r?cSk1?IFtu}14QY^UjZd=k@)B$X8EF9{x7jt54vliM2^=d| zO6hoQ?mI0F=I{MeQ4U@?ajlda4T7r&NtWQPG%dI zyBGmC8IRW}f7BuYAsNOQ>tlD3@l&7SqUyhV9`iLem__bFd`&A++(xxR%93QK*vP#< z#~y`~%{88KT?+|6sr;(apx9t&3v|CmsbD#bnNN%%QTArDHrQ1jXQ6n~HOIrc;JaakAfy@#NNLoW$3}^FHOHSE*qIK@{ znk7mfJN!V09*D<7lU!j@_-h4C+*ihcF?DZ`sfKOUZK4HE!uqBW(8L(P zBpg~!D-{Q(G>zC2Ktus(4;sD2{s7BbFTKK?xjWh|tWV?Sb3Pov`3Bhd|Ju>u<72ak z*Q}UJ>O&de`mF@ZEK=oJCc5*&0e(D-YTOhqM?;`5;aAl#7y%-s19?b4Lt4f<{j{Nh z3Xh$qLm+FX_Z*p;bU5@dtNBw6vdKA>_)ZM_gtBoPwAj;#M0HMSMllJR#~G%cl2QWV zz#%#|DtJ`-4$ly%fA-WCG9lP1jYe(fHx0AyWkFJXmj}QLqbc!^$LSPc^pz}BNrrh4 zpb}?svd{%VcC@nuT+lgdneEH9j09Vgtd9$o{?3&=!8r{bt`3Or0DMp@&WX?Fc9EY^ z)N$~I&|TGUPTtO6)kCAouNN=U+qJ3Cg$0^7u1=Md@bHOxl7K4Mt{lD{_IL{YOh@#ti+VS5$y3>Y+ZTG3XOgVg{Y#JpmNQu3 zCcIrkck!+qA*SEtYn}{DC*UHtNF?6Q+GzG>DAK-%Ncnw2PA2g9yZUIo83G=;J#Atv zWw?7Gs$6SNfWcbENpFXr3xhz%HE4oKI8HkB!7$00gy;5#3zS^0>`72FSF~H`X+mrZ zE{gYOP>K?;o;Yh$cQ}0_8`t=NC)CwUMvLMjBklt%4C0YiQpCO1&&}{M>A3y`68cu0 z8#MoMt2>`N_nxDnMS}XAk2q5)nWFqu>P3_Kwef6bOdStBh$Vrw*!e&0K}En3S0_o`*5uwlDe{V!h~NnyP;8cZijVErFfwg9d2O#^Ll#cCj{VqX4t z)w5W#E&h+H*R%Gsd;)Q*J~L7)#v$885^e~)))X~e_wKPyi9!SA0zEV{HQCe4CV3w7 zD`d-i=mo~7BK#OH>Y+T9K<`0XQ9b6Is!Gt0YP6VnVqkBrKfI%d9)RifSYe}-lq-i` z1|rsMf<|!z+|t<~0b;+SF_EhyA+afxKy4c^Wo)Bo@B0@cJxe>2g76jH22%Xw5^Mc$ zbxr1}UX0uhuS5Ev#$PX*n?BQ7mFw}SL=#7GYBooib=UR-HNs_MFJw!%1 zJiR>U>*7fLMo!3JG@lJ}A&nsvZ^wuCA1^lFrNL2FvoDpLdJuooa4NkeEx5Lx$0^H_ zbqstxz6xfmDMUb_%lzD!9FY@52sz3jCj@aaH(K;*&g&bjG+wDGdNO^^y1;Bk8Zh_; zda3@d6x0A|iY+^wu3rOt(7OZ9Vd#okLG)j~c^KSK z@In3yV6flX&R@*Md}q1u4X2KqT|+foPkK?|*D_GURI47kck*eew@->u1yGQqs7=7? zpXs`G`#5%tOWi(0Gmk%JPw@;=$Z;ax*HT5WkIDL(;n$_TGhMrN!lbFSx5Sr(@IhkT zt(A&zzn}gdM+Dm|gs8D!x=#$zpDU+*I6d%?=CC;bEZNoo{Lhl5FK_ln9DIU15<5WOy#Q zg7muT`<1HY|1$EJRMmQ~BFM&S=G~@z-lkcy_c|T7Uvz_^(E_!G2j7*|(I&ABI&y*C z6gb8^u?i4&wtc{dd4vV`_2P{jfMcf2dbiZ5C0SZ`;U%$SA!kRda~!Qp?hethM+_x4y8`%Q>>1nmab}KFp z*ATAeI&YI7n=lT5V$hNbHq=CLLanYxRYzP5eh_kA5?Ed*5=~{l@lNIj75U_BP2^83 zg2+FM)bVJ5l>RO`tvJ8b`Ld`APfjx=wxG5xfh)kpzP(=1k*_%sjIt#&S-6LXKnMK%32-R;sTf{$LQPW$67DZI;GO zTkY~Ut&_HdTHfs8l!G~vS)bj$^?MzMsq;s=Hi*wA24+M~Rn3w(%Ja3!!y!lX9&)}f zH+K@`-?4KFfi2lw+V-!bE%jKqex1Wz#O+`Y+3ziSbJ!C~Ktv>beWEMon<(JOF=ZVD6x?U@#!FBN`d3qXH z$acwDFN@~YJN;hdG#KCHW?49qq|Gq;Av76g2j@NaQB9~wpfWVI#Bi;#7A9c(%7^im z+@n)u98{($`dSzrQpV`2d(~MP`b1jD=|fKeZ^A zXXpVUA2Yu;ng7B`2%lZXjK+EI!PA0=J3`7^Y}Le4IL6r@B#@A8_z(0kQobL<>yLf) zi2Qzc@zEuwKA%rRLvYe13-aY+v6!kOrn^&O5#vRV8OLn$x0N~1;xqwfoL3l2`0zV3J$xz3qGVOHeU+70e&UV!bU)QN)XHZZ$ zOPUTAL-C~fJ5lS*)qgJx8Eb3m5R6dJ*at*SXUs<8C|~kn!1KYN-E4a%DEBk&iA1=5 z5F`qAOGN?8WjrCjwMBP=yG!eJ^OxL{l#V9Ie^UDi~od5PH1Ds+7~FW&B0}(-%d-B$@@1^f@;0{&H1V zLdHuLOM&)Gv0EHNkJI}Fi zr9Pf;Uc#9rm$_l*ls9YH1ER;`ZM$W_bSI*sw_3g7#>IF8EBSP}>2%$XeC) z%`nkBXEVBTygX~F&Fs`joh-dNG9+0S^%Y#Rk?;j90Bad6&OWk1InNBlzQ?IhADr-% znNF0ZDTC#>$?!UET{Sl!l&?%<+I+H^2GTLZd~I**n61ej&*;H z)CEzO(%3m0?wO$~vRGR5S?~ms#m*Y=8pU&YZrmlf`qZbf^fgHRz{LizJUouG37pvU zHJT-elLko>jH@2jF)(iG&%RH(a$jR^$^MS(M1W4&`z-^$$V&G0$0ebMWx4_3tk6={0_zMlkTX6#v^|R@y}CRjN#sQX`=`RO|?10pr}fUf((Za^lb|r zgo3CWOawdD z&)>ec7iiv>$BgU)JT;op_5Jjmbo)|+lCLa~s^#lPWfUhL7= zbm1^Ka0;AsIyakqnA-sW*FcBVpGAO2C0?zjgxN`yQdV}TPlTwgo>bdgOO4ntWed_O zUZla*)R)JR0TC7D?7qfu(TjU%OR3hI?}^b{pcM!#uCWmxcpIj~A}&;{nCd1%h)W1@ z(U=ofXxlyjZ<&@JWSB5vP)wUndVhHgYY|2spUDw`sE2u1=p1r8e=RSiB*bf-Le^Qr zN|pFa|02*cUU#3KvN^Qu`}qX3PtvVkdsEcyUgCJAiNO_2Yg4>7ldw4lDObm{gTeVP^w3Kb&y^=nCm&pNF z^<@r@eiCTYZ|U#y^Nx;0zrByxn{Qai1>+o>2l#ZGKeU=tsA~Cl$@^Pm1j?!GUSv}q ztR^N7E;ui{#LcmYaX>BDp8|VDWGd4B#XE*s18|KqIK|BzD(^i>N(&Sd_)s-Pu3Fch zPb&s}v&pQq_EaA)>Dz5-fL9M9$7EXd7=siGOgrc~IYt4WC&q-$3R( z?fEoKg*KB5;{W4s!~02W*dLZEg>%Sm`1)#ln%_}fcRGN~P)L-_hw6VNDK{G#|6`=D z^EL?pFzljM>*HO3U%ho*GA>$zLZ1*M|2TC5I=j_I7$;;95FrLED>oBbV z9H#9arc>!Si!YHH+}S^B5d~b|V-&hm2%G64`5SNqMn*b^ha=^PY5E+7$p&~iZ2>Ce zmvy*%c~H3r>zhpiT`{vE9W}OU#sXk>YkuHIFK&)5$`}ty03K@M?1gI9LNZo+mO*G| zJZ!NdRJA99+0Ubk#vDPzWdb~qo4|Dy_Cb}_Znd6zq+|5U;2GLq$IHl+uNvq{ObRm5 zdA;lPTvV_@4{JsZHmc9SJ}UvG>hA{a!|HrjW#?*7X2pP(9qma;IwmeBIBs@1&e z35me+Cd+IT1M~81H4HNMtJTgnGsv+hGM=djZDRRN3HO<}^f36#`_D#JpyjggEwiMMr!(tPdAlzbRo-o*M& zGpZCo^_zLbd?3=d*go+vYg8iU>7Ly-b&Jfrwkg%l4q$F%G|F1IILXywDWKa6WSb@i zS)+-{EN0b_1n~kIj*z~Z_cmHTfqyF_zeCO5zPbA>4g^RSV?v%;f>}Wu8=8F+q;OI= z5o8aJj35|DA5muUsKJ@t0ZRGX+uhaooSLEC;6*Z>af4ji{*PxcgUtI-cevIh1>8&<`%<>|KoN09;hxz=%IR}s zJ%1Os!$;SYQ|RO=BStQ7A4l5u9|pJLmkqFwRzH8d#--mrV`Pge>v=KQBKX~NpspZT zBeC%M8u~yi54zcW=Va~}fcxRKN+~8pFT6SUd+p9&o;W($vZBXAB1R#sQ3k6wMeEAc zcV~IPEihnc${sf7lT7bxM9!1ix07gFQbh6>*#0FJZYN)l7EEx)+E7a>%Cd$ClEnaEbzo?r2Jk(qf^d@$C)pF)V zK2*~&AR}fhIldXd(4yY?L+fUW+d=`+uQ0vQDra)aVr=7#no$uIOS@;UT1N6m(*K!8 zZ}{>ot2czqSZIPqE;Ut!Z$t!fw0AT8Sy*CnYrYv?ZUPU4-9*{!EtCgt=(Mu z#!XAea}2vG)?@DxP;7kFF8Q+4xV;~rcDXGa>%a8N!vT3ub^HzkFr*0KOugpVds;O59*_EcfV4OhnvZxJ+A2ZFvdv0ekoJh-`7z}GAm z@>$;a#*OD1UK)Tes^?g@slG_5U4usrS&!RG)W+x`U+UniZm zQbPqmzWJ!NAq+sWT0n}XtSl~=Zou}ib{oisPqZ37zoDDA|)Rnq7}UX%K@P`ul%+LfQq90S8O`uhXQ=?gIqq|DHREulr%ol!nYB9t5lzKUu<)^s_$~OhNWL2mF6< zCz#qFIhEDSp!s=9OePQf(2@mD(=E0!GGyN)fS(Xh#XsR4hB4J$wp87yD@_a7dp+BS z8HnB+L}bH~Kku)~PXE3ZLjQ9W)VCA1FSpU+*wj82eI!n!*A4%7LG)1R#fgz86&nR0 z$BOQ%1hJKmE$t1OFp`=*?zGir%?UtG;lpMOMfv7MMjgpmAaN^yF{{2*a->6y)s0~= zAUlW!HshS4;8}L?u2+}$Sf1D8k|h_`LH{OV7mgT4c+7@x5PJdw1aJO*ys5$h>nHh}={ zd+V%LOT4TN84+2@&y{gXeF958pIY3_8hKNMDI@s+PxWLrwksT@PRmZMfT$qj;FwrR zdCZ}AI7fW*R+1-yp`Giz*_rPIidJJJF@=fyYmB zt5;3X6?V>ZhPqa9QevuZ=8S!W7#;+$sCQm6)tI8Ca0#96uKQK$+!$~;AGTY^(y|~N zQ>s4S&Hx1~Mw4j=`1p^8-LuonCvU&r6xz5>etb0)Q=p!bL^c9X57Di_Nkp@m3^!(> zr>-2EjhZ-wRJ%zvH0LPlb02iSE|wW`LEA;{201DGdb(PNs*&D$=Xa7;`1dDaf42ym z{Tm8y@LK(&pV)*D4Z48S?C*LWRvW7TqUp7CXVo991j**}Z%AvgV7BeySQ_l7zh3$u z^!}L26-X4L1$T&T4wi+?DSHfsJ_K6pB}I~@pC2|$BKaRYcT%j`5MMz`ldO}D`lR(G zfKcJJkJrvKAZnF>Cf$z-jBAlKCN~VncoFxHr3s&Uua`FP-)RHos(j0uim}1H;h9FG z4oy&#ED1Q0b9$eB5x8=VnD%WK4}E_=$)M{rQy(!M;h2kCw4NNn-o@kpsA@UW#au~y z`|}`ACXm-I^_>W#j18$yWO;kxjF(^^EgyOAtkp3khRX9=7) zEjSS!6>LQ(ie(kQypgPoY*5}C zs63ztad325<~VaFpPfj$z9|8eNYD`!j;CoU-*`74w6Jpwu)IFvo99a_;QV^>4T!D9 z&&f|ga0}Zt+j48ZTo&S&k%bW7LTP1EfC<#=Z#QZ&*dMW+%JRzDy zg~C*4Y9PhAs<$ID+0%Ni{;Uu6^ypjk*#O@IM96beYSiV2tA->kM0b!s$c+3kKvxs2we%&lbO|!Wb5D~lS|z0dPlzC6 zJdq9yaWD0TzB8Hu(dJBo5BnPNAMo`PW>I$55{z+?fEwOTj?~da9m~IjLE(hy{+)P8rnn-<3%6+%epVG*O61JlbDN3lI(PR5<~0V0o4+$*lo`7=24?!M z-OToLHFg3x*QWuCqBD={ooM`G@bKxNR)95Zujts8ThXsYA~U3n`M%CtmY;GHwX{q} zjYVdVS*J~@Q35?aVlf2iJ!A2gXrzT8-WAmJViginG>%r>UM+ngWQlnkGI~4008Us& zov%oZ*Jh8|wV}17itpH~n)XR2JOERW-$6o?KuQPZEK_EOa1bDWAp(_!AJve|4uucR ziNnA&uiuQVMK~pOXPryf6e+I+x@H2r-JJ_oRkC#`nKf_NU=!n+QX4?jc5qgO&|>i8 z5Y%HR{HdJ>RE~_Z5*nfbI*Gl+2r}wFgV9Z-Gp=Sw@#7|G#Tdk~Nbjue5^@&rAb)89 zXLRB}YmZ)z&r2V9T%Lv0j_Zt@j8)@o^tKN|;pRor@A?&FH7N5+ch&&M2F1DRssB_1 zYU!khvM!6G)cZt|(xfIoF*xuiWq5FDB`DBlQKn5B0@j-TcZ*DQJ>y+0MYOubD;f@x zHy*q(tJTN{)e;SkZ3nEq>(ta_>vQ~z^^XuSqse)kdF}QHv5RyA`v4SXYLlPsgc#k_ zu|ZG&Htnk(uF{dT7%JlX{3nfzr+Y$H&^jZsJ@f%`L(Q?;frRBKT`W@034MX$C0qsx z^yCfz_?K=Zp-cyb9Qs?gMQrs~&`6)x63JSt3m+gVn?BoVGinP@=-8KhKhkkH9Ki$B z?4d^~`+T=#Y2+Pn(mD>XdSn(YC|y_t=sy2q2(*W@NR-N#KfjsRCP)EQJ1Bp?@p!PQ zykH)(%}G#97TrP{y$<$KAX_=&J%FaZ0L`oc3zH%K;J$&wvf+KJq~vg;NkmXJB$&^J zS$a6$u*QB-ts%C573Za}t5l_PMMLhpuVcJB$2opbTr+l8g)_)gTFb$Hb`a+PWTe`$ zywgp&fJzn5fxa;6dgQ^Vjn#t1Q9y|mYF7#SoJzdWadk$leEFyA(O?1m6$9#cW3&LQ zO~Bf;9pW0}!!V+zfLHNLd&PkQ)Me3>ZxOVfFBVqz!>a2izdIqH#R<|7GvtmVSs{Xa z?n?8p%C3jelF<>&8M#%oQt6x*hk>~M9GY_%S%p4t+? zM&UlB2YZz%ZBbn!f^=i7+C5Vjo$cd)q}=@i#{-Gl9&nt%ms~iX_Xq-w{j*^*31hizqQsBY?%Q7#+n+jX?ZK-xza$R(w`y`q zZxhUK<5i6pY*oLv0WDg^6mJ4!d+R3yaUGnFiYw4;d?i6Udnq>x$8d4KBl)*smQFhQ zg$|_Eoe@qo33StJHB9BePL?(Rf<%bnz)W9I+pQU@nu4nf_YzRs-eBjUb5B*ZMb{7^ zuxV`)YST&87jzfa@hC5|su+MkUDPHPrx%7@)EK9t#484mH%mibOja#UAJAh=2}p}V z7Sx>BiB8@1>;08u!ap4n%Uc&oq)iq{Q&J^M z3bFm~g1BEUZe$A%0T`Bi$Eg7k(60M%SAn?ILBIi0b6F@2dfzB2jfOhO{1v~E=3w4? ztzWu4cLZ)&w=-0vJ}!?X@A6}5&{2q;&E{qM3$1kZ7q1Y6mh;0up=Yk+2VF;3RZGGL zA7tecI`oaH$xwpr4bXF_BYFM1jWk`+yd}h7hz@0ch!`Nn;re^k49-1ncti)0mHNHT ziv2MA>yq#{XFgd8ACb=@5(*kPUOCofk}g5; z8^{FDG;69e*{Inb&^IYIo8k4AA{+P?Wpm&$6J6nx6fA)~KdYdEOR{aq`gQrzvm_Et ztp$REATet34Ucw=)=TAukPopLpV9-VZ=yGah$QrZR`^Op+oZ>@ zgG~x`wQ#8zzRsNhOLc<}=UsuJfcz&jixls=1yez4>%Vm3ZxgnpzSz=d$=?=Xd)~DF z@l0mDc;PKtF-RWESLzoGMes0Axr5`43hqczwn<^cCz0NIFd@^Y0m3U!=UhStD6YIyz{E4p6_T zTtpBHJ{LMIcLu8a3KPBrir~6uH~^BW7+QGCRwlR82Y7rjaT*@TIgj3qVO#R5Zvrac z{_I5Ce%mFP;wrn0uw3Ep2uNd8Yf|?l@8cxPa`NKL${J62AykF*dI5QcJzyrf zgjG#{{yBVUM%^ynFKJ85YD%*uZCj%U9bRgwTZ%UC7j|I`Fsvndf@E5&jfB1FE{79( z%s};5BJzFc-KP&)YmCUel|IhTc&8JL3{N?I=b!ku5ZGF`pX4%l`WMdiZ&WF`PYJM#Ul;l$mIBWOb*t9RBw6-RIXb&w}&- zF=&16oZe>9CX?XORcgf0^=>`ILR+YYj<7*z;-O#cNnZZGYCDke%=|@jPKh)%GKouc zJ<`@bbegL}QOC`5TD2z637X z{^BpPJdE4qyTxo;6?n}l?_$a%I*ko3Vl=$xwg4+twHl#Mz9(~6A0DL`NQ<$$d}d^D zmX|1@{A#SIr}|;*3lLzQiQitiwJO|XlYiP|JdcP;SX;4DVWak&hOx>Ue!_QWl~aXD0QVtb0!PpWbWIvbDuZajRs0(F@){$Y8A%#2|R`>_++9+ zu$N#?MfcaHj-jR1jM)BgGPY$@B4f5ew~Tk#o}dENAUkn==QqXAXo@LPpBK z=6e1CLT11fUb_&re@2NFAM(uY3iOpFrU2bz`W#%KfuzPKXOS9?;t@XPUD;|iF%s+J zn1+hHQ6JjnKlwASg}Tl1!GC|YB|=ZAAg4RMM*qptmY?UyUtYK(=l>} zG)(nTS9ls9O*Q*qa7KOwhfyj?aVa(cmAT-m(Q9j+^`(P*AomeK3feTq6?->kP>L6!v z1KtwkiRWBI?1JQ214jGwLGEB)&|v+hjTF67r{0WH)yIF7^nRZt{?zLNhTUxM>o7~T zB%P~HGu8zPPHfJ_8V{yi{V@k>^t#U!_X!7|v`|tY1F+6V@KH8C+5`;ImYN}-D0 z_kvyTIse1{wu7hvj|I#cz20ya%}mB8p{eltJ$Ymkpf;M(&CCOjW3XwO*tLMrb{UDDHGYn>Oh#`@pOM{0Z z5J|N4il$bG*OOY_(~|56b-yHExYUo^K?I{;!C8x*-pABwe(xE2(wD4krb~lR*X^-l zk>YBh>>-$Y2S_`FZ5MW$9ZMPfRf!p{rBKA??(9AAEirYlIdR$)U{$b~@uqnOG`kj{ z1>37(bBmB2fsbixoYEnQ+hFg!KNUZ(_%+;HeHXlvZY-|A;6sd2F3#h?5$8SM2F-j3 zLLl7j`UM6(oewe9lJN|(1%p{7;YmYAJXW4w$kJE@4L<2_DaD^y?)x*K zIYTG3&>#3)bT)B6rcem-LAX&}WQt>x^SMum_~0{zvKpEbi}&W~Gm9>LmdsV45SK}h zjoC3jgsb$l0zA4s+XxvcZ!M6T^Ufxb@5tKNm8H_e>7XvZ55Ki{Cd(O_u(WUc+=agl zPvx{F1feBHUsh%sJtI~~c=(@&Kp3oxo)i;{-Wfr(O7%(tvu*hgC`4Xjmo(M*$5cC2ZnV=$r9oqDe zwZ(C?Fv;pls|`@p(TJ&}GrM#LItTS-(H%WJqgK_THCs9h}fv+h7Ha;8^ji)|G zI8v81ttEKIHx%1`VfYYFum_0Ry~d-5&gnqNnI;&|w#f^+FUkBl3O<}+0ABM__Bt3r z(kBI8%g75_qQQ_N=UpVVoknwz83mb*7`&+R0H4KwEf<){17&;afWTjt-8GtDt_8%> zL^n|ns{`|pED#}f4)vO5;|M65&Hx=6Woa|c789=1Ja9-zSe z&qtae6KS=|n{aKGI?XRbMT#xU+2$7cUbAPE+0Z()JUR}Zubk`Kq{$ZG5O2`^s{P*S z%(w9clYcgwdu*KH~h z&3770aJxuztTZ-iS&LO!Qo~YyuNzI8diE@enb~wM(atAZ`?I!6)o(v_mCy0m&v8mu%(0Hb@GFiH!^XA zWjwnu!+miliQ499;0p`wAbc|a=f8M?rC z28w@^QglX|sU{{3dDPq3{@?nNn6D{5$ND31owzEQN+#;PU z+N&9YI!r|bY%_>usmhNao^|vIm|L(oo8b%3PnQQBQRnpkD;Cp4RZ|q=&77f-Uf!7R z{sMx{_-@-<>*;?t*GzQN^n(J##09y#@$|^UcIrYYB3Jqj<1XdmI#BCaq4-e%q%6 z{)BaUUsbUWmHdY2j{QHGyH7Ap*3&;2F)`Z68e|V59;B7RUX~K)AGUV`k9~sM|#MNdCP%2?6)LW=CR) z&&h2L^nQm`FksH7Vj?4N88;$-@s{|}Rj~A|<4D_MgNs17h3dmhTx5?48zGCyJnfE*!2Yz&AAzc)8SJg5bH_B^r zI^dr+;J9(|(;#=TpZg$yRMgTvcqesibwbI|7z7ygP*OuWXV##d(V-ZTmfv@Ihxx z3jxNOA`cVT*jbl3dbrm7ovk}gCN_XE(%@Rw`I-7A67>T@AJ^zo}j`# zMk_xg2%0*hMbOt{w4a{GzKbSb)=Om$BCuKW&U(c%#U%M>i>&(?T*g*+q5=Latd2bh zNQ{2=0d6pDyO?fX7bxr-I~gdE{IY#?{B3%I>%q46Ba82o7;;0oDO?L|YNL8@5cc?2 zl;_+2cb$@0{3iE?IxWnC_SN=_o&|ON2-;#`V7ZvwH5InW4Xb8 z*;VSGx`NtNm|WUn>3_SVP(qYc`=jKd@J*bjO&fx(d311l_T^icBma z%U4FiE|EBz<+&EItjpJO;fh73@~uk2gomF75>&53T2+g(T0QZ5>#@yvXutLfyIPbc K6xaW8-~a$0cX@39 literal 0 HcmV?d00001 diff --git a/media/so101/leader_zero.webp b/media/so101/leader_zero.webp new file mode 100644 index 0000000000000000000000000000000000000000..218b43e46e76bd7cf63c74ec9afcadb124497b25 GIT binary patch literal 30790 zcmV(>K-j-hNk&FKcmM!bMM6+kP&gnmcmM#9+yk8fDzF4V20o!sqfRCyt0^Gz2*F?! z32AQstLOhtz8=LYN&l0>caQ2TpS<3{tF`^23;y14-huzea(x+^y3>O=%2()L|5X2H zx4<=N@cg{~KfVY1?_FGKzX^YEJy(9#de-^XWAop}{NMkNpueF1SK$BWzj*Ue#vj}G zDgC3jfBF6g{G<90*k9Zq>Uf5C5BZ*j{_}m`_D|XRfIow1H`1RZ=@9jg|Bt^kBll|# zAT}MQskKuzO;rO34%1Z*pxAbrruZ;}F#)jgYuD9Jl1;D*x`RP7xK*9)6qagg)tL{V zx==eJl`OOvEQ(2+Aa|`29-|J^RfWAn`{NnDN!^WXPzP!$7_9j2+aK2Sv7F20Cjzx9CP@cB(udA-i{l{h+1ja9&a&GJ@Ni+w%8#adKU zR2LX!Rjq5jsF|s?Q{st1Ebkt`2?|rb>LqGec=IE0J&1ODc~U9ki=e0#uZn!V@)aC7 zX@pbYt*Hox9@RO=VGq=4qv#mZIT zBSopdn_mYjGXU$a(^TQkAW9utmu2+x;e1RPAOS7jX`5t)u8b}AFa&g+Nkpd@#hVaA ztq?ao(IKxpTr+W=+bj5)k__@0YBN}uDW2_w%!Frp+*ovsJ4yf!(_7O}b3|X*5y4V& zPcim&i-vX7SKW=Kq{Y^}15|##vmFm`?1F~N>8f3YXh9nrp)le~vx47LKiO!^(A}puHP^mX`6$9$Tpe8`=)0njGavC>j zhXqNm16cj=^&weOufPd-Qio9x_zNT&h%VmY+QFGGsX=G`mLe9m5I1qX#MuM5&kKya ztEa$LJFnYvo$$O5>@k|87ODm$crLyzD_Z+2>BJdLRQRCgpxs#=Ro|mF9j4FJFug?) zNmM8>>Xi7vp)(Ol?^-529j2;e+*2(O%(JlJ9%Vx~9_g?fbSoqlfU6hOnL6!ro0*yE zx7=URQm(?akyGJMXxb+m5gi!ZWB!Tm&(t})X_2YCL z0;`uOb4php72duJnLm1}r2W#SAW*rRJ)9O{$)!S3++ncL=y`g&^*B2UAiB z{$(|7NGfH*MTXW()xQ6OTA5vERMGUci&M2%PAPv-X!&%W(c>S`Dd512V@mqA@M^P?%@O@jUibn zSJh$T=|YqGGPI{W>5sUUbqP-84va({N=xQ8^d@sI4{7B!!;VaY5)sH*m~Z$-;B*^CG?Zt+^dx`ZXh?_6`1!Ut|i6R*T@s% z>KGi7PGJbGL&ad4rx--o3k+Hj(#1l^I^w5c-cuX648kgI;gtdUir&7hiP9t`f^7kYW zhA`+zF$}5>O2%$@8r=BJ^I>T*te3De=;SVpIofKQRY6jrpm&T(bH#Z^A+d_1RvGNi zmn!gsDy*5|N3c(+wW>8{~VBckjyR_ZOK6zs?N0|J~Z?dcVC|49V2xPNpR#xgD z17Zu|7S02;lPu3MaRGHNd)X-Q_7!Y&0Z*s+-o0rp_`qnd`VXs6yNDWF^EX&Q9@@Vt zHu`Dx+}$l)Ps85K2NyE?C#pkfSSo%s-bs7YE(Iv#IM zrd;$LR00oRUiRZyacR21j)TM)X70IdcxI6Eb|ydMy!@tWH-ZE5P$Z@x8Rh7h2^!E8 zo=qAl(7TmTka8)AJiyEtnV%R6)w;7E5#gsjKMH{-X?H6u)s)Ap7i!T@#TT7h5^nIh zW3nKoQV;~KROO!CEzC9>O|EqQf;xv!0^{I~jP-B@7fIKT;ADA1Y&*6mQ=D zwJi$uMq5Io%1$}u5L~WKu;bcH+bp%7BM*jrgcx6`$kf}OXce&&0C79|Yg}-#TXw~848TkIVDtdr|e4D8yn0l|4%n}tKfV#D5e>X#V z0O8?J=_@kC%jIIPvqj>km>MPNfOR3n&N^f{!dB)i6^4g!l6OPA@BaKaCx)mrA`+%v^ ze%9Lzti_9I%8!o^9{-07HVSantQvRHMG)$#Su9q%)WQLBH(#FT_SVbv-VL=>UOFL! z-R!eEJw`&g7gHdrd8{iE(Ipo2*la-CT3Ma!u(kkk4(rXgdw&a@RT_%fbAIc0Z+!*Dh8 z=!i!D|NCf=dbpL(>M&AHtJOA*H)`LIOq_{lbnO5Y$0!BcW9EPV$YRxg{93mOI+Xj4 z6r$FDj%&vXw|$J|7~#C!6FUL7?3d9Fv%$a^Cp<)%#4dmDZa5FVOl(p>6PJJfdjTmB zNn${&HH^LGl@VJDh*`7myDJ=ggk)Awp1Wq|61F?BKSad{C8s2Wh3(l_!#a zz&p^KcDALEXr3Pu)Pg?icj1>CD;i`V^Olk_}+Z` z_d2QNdwvy%+gU75bp8NCiy?;Sx_DRX^{s>oXvajWCA17D){|YJ1dEt6jUjBr0IeU2 zW^Xo#zI%YIma{K`KAdO@VTHT&Q->0lg-_=wRRvKIJ=8R`n>e>Ri{^mmIL$g~Gh0|; z7-C*w!P7$f0pWMl5uqx@U&b&@W>#5u59;+wM|+oS#Ko!vV0%^gjP?Za7~JBvrHe8~ znPt_vZEM6>jk;9^l1VV^ee^L3bN&xfTFxWNq8DS3e~m*4t=igXC@chHcwbQerf6|e ziM^XSuuE6A?pA~Hv|0}D0=apLkp=B;4Xq!ohg5&C3w_$l2VnJ;kcL8r%%iP4N#JKB z-pZN?Oi|rn-e?P!uW7G;I=k>Efd=i)`D2g}r7Y7Fh*476IBqoFLRJI11p51)uFyK2 zcL5R3Ldk5-jXd>VTA{rlXrgc4Kyh!cr>`K>i3`w=I%s2tBw0lg2jwd|$^HH>wo0QC z&oO&*3)q;Voa01;!9&7Qsw_BFk7>qPvHFyGOS0`WQW_GL1YGl4MA6L|8dTc85#xiC ziJy&JEE-i==vBQH1tuH7Jn)e|BW3RCp8}m68fA6OHrJtgtH$zQ0ySj&7g!DI z$ANK?H9zcfe)Xgr`X(b=$4{j(BnxBwQ4-xEDPunN^Zv^&R9~<*ymqzZ z7`ECdV#7fs{9w2}`hCEQKrf?woTCR1bLAM=69<;W7#Q-qU|b zz|1N0-`s_4w9f;Ho2@Lbz1hB$+%>f0kC@1IB!E?46X| zZo?1Y>ps^Ju2T^ZN_|TToA(5BPEC97p_AF$ewp{HQ2(C-xUB+_JD?S$&)w>-Aa`M9 zVKb;~ptbEBu|>zXsChWp!rN7_a;42tuX=0%ST*_Zv&e`*F;FHm^T`ZvbF1A-T2-T1 zNqV0YFMf+Rgg;6ll;!*@9%R6Hjx}02$;yPXhVwwaY#+qQsLQ9&+jvvZm+7&ng?E`= z8GTFp|KLx$Gmuv3zci-@u?lIn3oUJ6;d}xLo})^IUXxU88dAX-C&{{@+sobRST;qr z9@Fu7lWG7w9Ean!slkW-f?0uxVgBw>{PbP-5xkpG;C#i#NP`RS>`frAspG@7ZG$V5 z{3j;AR;49(5t4%Kf@V)LO=4cq^yz+hcqdg59k!^(c%HM-Iy`Z|=9GFaE8fe3Lc8|( zCAtKGeYDFYV4AQ0y6;YT9eX=jVfnB{tH9oIczoQNL4+*Rz=&QF$TRHRq|lP57_sH1 zQY9J5N>iPR)jZ;DYuZ{RMCMTpDJiN5#l<`WkKv!7n6bzUHOLkbWSL>O7aOlPH+4B* z6buyLxdT(rfDPA?!I$e@==7OpLAWGs`!p)k=7yiG7|`ozQHs=-NCh*u$YPcGjqy36 z^|P+)=huXW%mCiuhN}QQi}((G)Y5`MKG!v&MUqF?y93Cjw(_N(s)1YjRcB2GC7hZi z4fyq}s^AzR5zUPyc79KPMeWx(V5{>xDRYFUBO`HLdb=4;BUEboXs{C#uQGdVHHQtsnR#8)FTu)g)#b^xnra5`131%x*(U3NrA2vnAE zv@c1tLk0QsYyAe*8y(Sn3mCy$dX?yIH~MSfJdK_U%Nc@WcWk;ln8w>#VMQVCl3)Vs z$|L+Woy!zapDW5?Q$dSu>Jqk}_!$$+pKw6%tAq!lPWxN5@4oRV>~IbEd;_9(_?Embmy9m;;KP!}D#& z-i3BVDVGwI^0oXjeILh;!JJ2T>XY;>?|T6@Pps4Jj>9Zm=OkU~Lw9t!wq3@sz(kdt zx#}XpK&Is+ZKQEv^}Ru{g<$GQ@ofs$=7CKOL?d%5SF$;sEmzfud}OauI=$^WLdRS^ zWS|)HsI5bVfDi{a{=L4qb+c&*uxq>D$IB*wpZXp|rp18XI zYnk$W6s(0;I;WB%UZNgfzA*v^W5urrp-Ayuaw)_|CA7OYF|J$PG72RGMZ+Y@0SG$_ zFjEyr2SYi;n;sw5n9Z z&&jZT*jjCd8I9Tk!?{XMa^K@EGJ*;fW6x3u;1CeHoU|eyfJl5Q(w$N#rQWx(hZr>L zjBb4QAG6L5izv}csJNWMP_1Z$#Gq%XuJS3m{etggprcxEI7uon42-&uSJD2ohw z-Q;UV*70pJMmnFU(mMw*G5~Ku@xgavR&toC0l!Sjxt>Bf%|)__n=s{D;L3OWY@WxdPQol_K{A?!}*J2G*~8uKsPsQBo|$KT+b199?O ze-TK{u6$T2S7EY1U$<^YZ@fk9zL+dnIa2v@>nu|)I`?@gvIrNy|HSU9ZB+PDloDS# zl2I~WiJ2F1XEU#wYtPtKqqz4Fv+wI$`gZL7Ao`&MT_?NLkz-Vv)1GR4> z+K-Q%jvI#z95f$J>fpSN`}yB>GXGTRtvZyx!NcMbIc7>VF{3oJ(tW3rk&L{Q3fC3rVSMexN&DguI}sPO)`R z=^zx2(^T5N8T!$ywBjMAN+o+h=)j6nM_VWS9)hXWl|*+Z@oOK-8^gQ$Cu3N{w4ea( zhB+=iu;(I07ll<_*u%G`4pt8sGL+R}eUvJkImUH)y^6_D5Rg@wu=XRIFu|8|hYcs# zxP(Bc&u`yV2U-C6d8nS}#0ESg8;qx4$M95Cnv0D^KSizq2`f8h0xSMV3 zO29WjjmVWYeQ0K~TLK&{fN=&thN5*-cE0v`APYK*L}d2sim&c=+NymH%ex$b#o_~D z+G?9zG$uIujKiF9$Vyj%E`!d~XX*pT^`mRtKz7O1 z;?Qpm8D)`F3OjW&yI8%ua3i{a`o4(}*&fu~_=7%b0HkeQnTI{a8oqtft5p zYU!JZD5Qp#Y&-}|631T6r(y_5D(S|ti3zclC~1o~8uKZyWz-ujnXwTv@wl@#W`kPN zoCPC55V;1ihic@_R16>^RP_ywTYNVV8xF{nGgSCk!|luxZqlf1pHRSTyAgissqsgr zx0N%_4ev~*s+IEt_Nr|>F%z?d{DFk?`CawP={&hgwuLqIbQSe zX)@bs(@ciL>)K`s=I@%@`A>n`oE$)GJ59>ZmBep57KnIMdIXQ?ybjY;+xIjgG|Fn1 zC+=w}Pi!ddEs|xwUsXORS+MOjP2(?o^hGJEp9?3EHHQ%Ah#`!ZRGjC#&J8B<1E2r5 zcoE^ogj;vsLnju&-iR~k&kgP#D8sNXd~S=) zlbl?ftbn@@LVS`M2Ck9(^nRSVN~arNuUE4;-Z_-jP@6R&0WR=SjNVsY`5bBcD~B<` z2`$i~6R6JS5H1_db(V$Jk@;AebxlgtNL?e1CV<|Hf{a3+5yGR<^Quvg^S?8QFBi6N zaw%)HPY%&C#re>lhkZ?jpp&`z=ak&zPXol!ts4L0Pn__=y;03lGJ)XZex$E`H>{g^ zmEg_c+G?At`-Wn^Y->(ogBhU|f2V}~FGfXO%foI9H{oD(t|uu-gnFo26uS|nZ|00< z%qIyp4a#I+;<*$_bI&UvD$6k9aeP}q`zT{ySC~-%;nimio*%wJj&@oEC9m|J0S;__ z8~fh@>2WhF%;FX8-|swNdPo;w1(88ho$S7c-PNpAsmXE$KyLB;9l8ehF2ho9q*+2R z2D(qF%gD<*Lj{-}$!xdW8*tNY5!1p6u2?%NQU;O>;JB_XI7g? z0S?lmmH;It-t8K%YhVF%9NUw4Xva!#_dmDg*~Tmn_5d}ae)Ku*uVB4I{X?^{`Grjw z?U6wxW_so^C;`C3*2N_LgE;uJGqra;m|V*-+XD+T+Y&8XNh+_P_%>V1pEWwK2i4^| zxZD@`b+@tsC4Juv_U&aCs9hd$|b&Qs1j0}KZ)rVH`C8Gjy67*kNL zn;Teuj>VGI*25N9oSw&!fYEX#i_IXR`uhvbaN5ddv|YJFA5l^Gt$-WUHA563NmOBJ zwxj&Br#6b}4_z;krbmCkq|PynI2uOq0_1I|PjNuX0|y$rGfdC`0RGzA;bNq2@bg2q z=B)k@l2S1#r#bz3nQU;7A2w)6*OXN@GC26$E`>b$YzSE zBJ;O7^nY<~k69gu^TH8>*9PkT9t<0wI;I(+3>v=~vsS>|uXU^z^q$Es|Itixo9vBLP0 z`uw~2oJYL^zzc&&TiPzqYh&ZBdgE4)R=t?ZEbPAkTa?_uGh4MMJ17R-+6z^uap0>| z!7pq2Vxv(;{S@%3v3-KweQ>Mhq^$OL*bltGq7en0)}ls^tl6FXRcJXon6SAr0V5S6T;O_qdFj?yF$h5+ohiccjhK*bDe0S6nQ%qE+`PJB*q{4Y zxg${pQ9aht;{y&d(u8)rc=)afOK42g1t^US0uzjFlULE;My|1{Q|w#7^Dy+C38FO` zEeeL~(o#6pwAJ;Z9qQ!+70mkjJ*9uTIgQ>Qt&Q9-I1N;*E0u(YXhT=wRZ(Q+KiKmRR zRS7XTD{NqzTMx5a+=oZ>RGGaBe~r&rJtzf=FBo~jPAq|WwYG!&wj_{h)`;4|lswA# z%fgGHta~t^kus1`W8i9eH`;aI)dUu3xxjFj5U*_cr0!?7Z zvkMgs&!dAWs~m+Gv#h_dFb|PcN{qPLyzb`HP|X@7u5LA4RbC>ZXNb?{FWL37J(UJH zPC55<+fQD^foTJXkqZ}@1ezN*YgaQT9b2z%3eR9LtqKHit3W{@HhbGBY-*)(IW?>4 z#hmnShg-HjBTk=->|OfCK(s6&D<2@R+!!_5zX(wx&0a##7A)ixTrf4@021ygXk=kT zfg{d9R{))wI^_CIkmYH6Xpz+w_*F4%7;++~UxHC*LIb6>E3M1>B=Z)yhh)7Kc!IT< z^Mn>oXBf>Yhf1iJg$!z5E0d6#a5@C*3$C)K0M9nG-4Nq%B$Y@2yzWdlV146;bMFZrZjG!PM|BZGfLA>4tX$^3y;yG~mGhs)K)EMK_}~jlCsCqU$UYRjbJ5F@ruf$Z0%atHK*7gt2|+eH9HIK3cIp-K`=J+#&>O0zE|ry!Z> ze~Z|)RLg0=wD!_28p;MbaEJ$E^p$#ZK<%a7VSPX-wC=kpsPnqPLvOy1*yfc68n9 zTtJoF+k0oAm2F6A0ZPT7Y@=*zS}zomVK2Jj%=Vdy_mL%S*kt-(+NM}h9~o#UK$2+U zsIt_$ypn6G+#2fxg-;WXnT&DR>>P}cwZIX#q0IHAr&!Y=G*NNM$>$gt8YWQ%uA4UI zMj!c-aN_WmAw^wlP)%@UfM)Dtwc6^W51HbuDwR5rP&yaX;HHEx7|=jgccBzlV8%^9UE12mYyC$gb-u*&CM3cM(pHtl?mtoT@NCFdV|Ig&$ zUD){`>EAgvsg>PW{O@JIRR5c!fVk(6nj3?bqca9FH}_$Rge1SRMMC5n6SdmRd?oEm zffztjx5ua1(cJ*?=;Zsk=FIZR-}vyxT`5=~9gQnkxGTf%>V2a7j`s4_K?s}1!mDrm zB0;pyF6^uRSs2WD6l-FLH9N4KHuCI)cj&oZ44kEtI_M%bljhs#)!r%uxPCl-saKf> z+&?`yQ9i1thK^L!cew%l%pDFHC?Qv8nv|VOM)LT>`&#IjE~{MDCNPYvtI65F`LnYI z&+oXIw>1RBtP&K??Z#x*gCMLu=MT}yo5BLwIsiNMFru5IF#Qd_Y5%mpO+YZNGtCWC zHn*rwifU5Az7pMDlyQT7Lv45JE9RQ&_**B7v@D^p@G@2Q$^9T{r=fm6Ol zV$qwgV+}FqEu$0#yCU7*Qz+4Lsr-S4zlUui$?+Aflcq948~{FwEFbIRXoe6TT~*F> z<6&-h{|$X>cI$^2pY#HuQw?tlfLaMO+?EE=-#uf|b=jzaX~l0*zX+QW-g!^*N2pnX zX$-Gu(d8%^JodHun_E0TbFt`RnrCQwg1~2XM@+j+jSf&CdsQiW(}IlpH-lj{9P$Bq z)CJ@6Rejf+X&o5qD?yorSRL0ruixlLSKS5BWKQYuc8@e;sA(uW8m}ZtSXi17dyTv5 zooBxsgq=aD z0f`RhD$Ieh6CH1zjNRFf*Ks(HR6RoauNcs{(OLum58~=z7zLjzh<^F!cwLtFY3LvW z&VqBw&n9W__M{>>VOM+YIAjJ#2`JUWuIFLN_OWRup^w~Bq_~Z=Q@BX)PVPzV;T<5MvISP|26_|hD9_)lhL0YnGF$ob&IilTX)!aB6qbGCuqJBR4Ae5Fp z&Gwp0U)5KmV>WY$8n;wDM*nHpz7aQqS36%%lCUY3pn$i)EV7I*Hv-DM0^>(cX44PW zSB@Z~a_+J|jHR$Ld;-cpoCvF2_+yw|$g2`S+Nx?RBCJQ$8znK?EV+u0VceU5&xOl9 zjO%Aoyyp6x?ambQn6}Pd0Rp4sLDA}hox>)t#mP@4aEyn}eZ{xYi|xG~Z%mEx_C}Nb zEnTZ!yf1G@YwTONiFDTDc#BX7Kj=&<*=3rRoAJ4RQ!B*O=m(b~xDEgndM>C9!Q~u2 zkk;RWm`TJ7!wv+m0#w~gDu1+jtfS1P;SXA)A?vtqy?1Q`zjqg>opi6kkb4=$SW4;c zc%608@pCqb&SyDj25z2h7)zhtluS@{^(l!#gwO(y9I;Q1H##ru0f$o^RnTL+ce^dY z*H_FS3@1kl7+kIFL1|J;BgXyQ5rqjU_8~+knaf=EvJ*RgOp7Hp&r(cp@zpy9R4_iq zccx%>#%@(SF0H;0YX<2q+qSC-hZfc*gotlvA*)`%iM17R#~hOO+x!S0@sf|E^h3Z_ zvAXR=z`{gON-`y52BGOEgS(mb0`~8IV<7VC5edR0{!Qd zIUA{qq31a5mlIcPrM$j|_ORP8t`V%5qS^XL;mGqiAhh8)otgI_P-mf^>Rz#;TlwA~ zBxI;63ZeCTJFtQS311Yz-BBU?;t|Z?%}te-Mo&|bwr>NNhg=|K%9RRzcCS-p3Q9MY z##5KHd_iupmG?IlT&DF&LdcyDhror4dWz~?+svyFm;kE^dSpKNkqY?I>|!|=UfH8v zG`d}Xlr`N1W==vpPYG5g)%wLv$K2QxQ%c~dT9pZocms6MBXH+%T`E#$u2PQNZ(`hU z7a4BDp3318Bmtwd@3%n-H0GPlK-w6yFQo#n-iZTyVuLB1(=jGJqlEd4embpHxLc2H z72uS30csJV1yzt@t7U#n4#MrtMIZ=q?^v1dBG|`Z9gh#x5g^)t14GvRHG^SbsQ9~@ ze*Kz0Sc;z~TvLpwG(05bVVN#`$Vp1Y7qf4)MfB==_;j8HYm`ahYuZW;`j^ zJ;;bLC>|0>s~O65t5iYVlKKQr1v``I?&_uNbD=KWMkfN@XeT8OnymUKskLj6WNihQ zf!Ch%@5vdI%7c5!p72baI%Re&?9gC{N=r5_f?v_`06TgbLZi?lBo94N>ZC8c%PjfZ zM5|wW1jc_U(R{OBeJ}+bOJP6~)R=Jipn5!o8;ws&<1;T`VGCCV16zv!R+x^Q@R- zV%}@zD}FaCsppFl1H)o7v8`xivy4**x)&%4sfa){H96J15Oq6LX1t0ye!=k~#h zI%pjkr8fef!Gpyk+e4B*7^=^Wg{pqd!UR|B%N|&e; z->R?8W@(Yq0jh}Z;JZ?xOeceFk{s{E)q~ zPkW}uH&b0%7nq*?YqsFC2$91xMSnL+diC*z1tnoYy6cS1-jffL>6wo9fD!gJ_P^Qr z6nfxOga*c4isJ{-0a$|eh5LpcZU&3QTw?7X4*bV#!fiE&F!4R?r`RBp!nnza(Mjnn z(mU4rDTnk%6Unuz6@pT=-QiZcw83}S>{`@Z*LLchpSGRmX6}=7ZnTM$8#u>E0b5J= zh`#DEXM{qBW<+6$=A_q@Cnlz|_$O_}y+4ik=7rDLHw3K$tUyDstUO@lE5+UVarbc{ z-yuI-2S;m`4BzATWtP!?6;t&hPC?o84N;4jY&X`6p`CbA779XOkj?gC9Iwuq16jT_MRTAq{$a5Q-)xlp8&|}D0xXIW_jwYhVJT%wZ z>uAA47=xt_*D|ZRiAe4;{4Cm`xmrgm_q&1sz#=g(aQ0R>Ai;<~9;9q|sdKE@@31?1 z??(X-F#%BdzfgZ$_Um5S8z->5MnPgYAQIESAJX_HR1rNNIm`cQqp0Y?8ivcuG(!d2 zQOrq+zZn?opBy_}(NIJ9VhmC?+R7|znUDOTyYgG$rN3;)OJJal%xrUb7O>4j9$*9d z9~tPK)*uUMHFW12BJ00h8w-QWPQ-p@3RUJU5O$v{mqNt53v<_;mVjZNt(bmXek6-6HF2+l2io8DL0D%eAgrloFTYLLr7?pOoz$DNmad}={nb6JbY^h|3eH+4(0?lhCo zs(sZlSMRpv(;py9_#JW0lp+fKJ<4(!tRa+;J(+}nE?QjEL`s$KKUArv+E2$_Yweq* zft!XW<;&E}a7lvXd-_Tr;5%WC!p&!vZ8t4Wa~q*f z(?OOa+b@@hEQ?lL7dab0er0tjJY8w3XUcAM7~x#cXew3`%3N^0cmzE>Toh7d-2-VH z2Q11}!!(~AvOF5%A8v4>(bTBs#icdvPY1f;RsJ(reKqg#VBXPN8IFAhUt2e6L@S2- z;@WKw6zkyVb0i<1pRd?fE%Vs$0Vg3-U*N*1&l_!Ml@Y(ZE~_787boo>$^WfSSytpf zYJ?9%SdDn_&qNrX7Dub(X#LE3<$rr^lC5@KdT}yFDmwM@Z8oQ+dc<+g&@`Mltpm^7 zUPP38v}Q)1hS-wGTc~ks)tX-#`B&yJ?CAfmns^Z~L*!Zv_8bN@gE35HaRjpOlhR<@ zRo8hkx7sRF`Z1BJTj^?ZWt*K%I>v}N=qJ{!!;bnVKw0f^WrjzK&+u@G)YE}kRWoPF zyJFC0rdjqq`d=s@Qv3_w!Jn}G=SI-kd-+)Y2d1xXVj!6W$cuLK&njX?;Q(5Iiu_Q_ zWHNSX#b*O6OHkLBsBj1v=%Z{>6;xYI}E}cnLnDBv@dC(gfoO8n*p4`DK00Pm${azY|3b93l!j;G zC=cE#iDtD_uYI}pTimFKiEmQ4&nejGi@d?J`td01GjyM0w)J0OsuqhTRI==;bJ*Q2 zor#ZkRgmHTkgnH=)6=VNECotm*`L{h)3``zo+*)Y_l+RBDQw&n>py+*zA~mi7@XtX z<{-;7hP(6^D%7`5KQbo|^3o(JP`a5r*C~Z9&Fg6q3T2udHnmT?P_J%?tSEE+*^UaF z*Fc=iGvW6OP5-^k$p&d|5A~dJ94=tu*bQ&6q&ZZ6J39YymlbF>Tm9}|FoOcrRm^rI z-G>Fcj-`a>_Fx-O9diBSgy}ThMC=H{?~C~7Wp{6~4cQ%1l@SUeEvM-iY32VD`-{jlgaX zJ;g6#!<=$ATckdbxp!ZDWPttG{8Miq)hAN$(0Sic1Au8Pxensg&2_^*$c7|}>EPq@ zMDs^+Em#=FNtju2Eulj=1006rv|x!MuaCD*Jiy&rzP%lXHn#FGFo@cgCIsHeCcL1X z1V_UCyER5}v=PX6EuOvx!bEjS*O{<%Ic2j4x&Q?kjO@6XGA{uq<}XWj_FHMrRz>`t z_rRq|?}AQO$y~TfATgaPyZ#>2ucMO%tnNGBWr!jk;jPy*cUuvd!=wvt!?_b6Pz^1_a z4Xu>yt}8C$cJ(lf(nRif;dO=I6#UFB6=*toxa*dXZq(=QRr!$m6wF{~JS=#E&w}~i znYJpB$i==``ggqpdd$u3yM8pd%r3&u3S*g9QH4W!?;2}(q6&#pisxKmUZ5C`!cI3!7rI24y>69dhg955m08OuRv4*k1LZ5U5l10%Ry5MS+ zd`^w1GZHvdUz>+2VuYa@Daa%Pw_eIgDOZM5Jh6~Fl}T{1F8+P&Mzt#~#dm1@I$X^? za{;B~%qL+phGrdM5Oo%gZ{1@R>&jXz%WfoweSiKtwLII;uI*YkArNY2PLh zja~U>6bOoz4CpCw(xy#A7>`k@pi=Eq0&DbXr{W}QDN2WTP|}oJI!u6U60!DcQHQ8x ztls5_tG;a2D`a?GhW@*VCQ5S~6C#M+AG5NKFZr}OT2EI#N7A~gBP<6CgFB!V?Dz-v_4{yUnElN zSi1+-0n{f>`g{-m^ttU&$9M!+sIvfP<1mf7sdC4L81#oTWYnz7@iW^o%6?pAfq?KO zKrJv!z% zWfvx)M6J4Enfb6np;6Cny~#Ro{$k-k#hFDpeY`5hco|@vXL)0Zfk41J_=}vX&i9!- zeN(KDPwm=#8G8{TbzEDH3yIWD;lJ9$2Tx!R%@`88xHOXJojQ1@UY=uC_BM!-5aDcB%)~v;lbJ`d)pw zisHtyknaJn=lMa*oAMy3>NNjfU9>^=TK|fXmgqK{Z)sAz_crw$EYYxA+}HeY*YkLy zRz`j2?OrKwLBb@$rRn|K0Kb3t00Q~FFvoI0kMdKB!llfty?1R-G%h0Gmh1&f^Wv0* zCWf{!dW}gC*g#Z$))dNMfy%MCsW<6RA85>3GFc|yz!N)L3}fQl6sdqacE)e9#8{$1 zX=UL}5`KXkUHY$nx>{cvb8p>uDMs@%aUHR35cNDvxSLW!MHt1fV?gXxOddX}%X}9O zSb&s9MK^ztiD7~G(o@0Mm}A?P)GU>H1)C0cbiik3b`!gIZlFot&UX!-@Q!c;YVQ9u zn|zYwGl*MvIyq&X^28Q6d|rWb3a?i5zwvCFglx=~uMF`tyt4Lk|IwYB1a?th@o8i9 zuYPeEaW5+fZLVn630WT0XI=CIWE-jK1MGYqc83YQg?qdOwXe3l_{fC>&xZVeta(^53h*2aaVRyQ(m>8HaJkBx z_}l&TSm8QsTY?Pf$xH?LXr5147ScD1qTZXL8>a^9yqmYYe|S;Nb2asF*$9Jb{(0HO zLM`oYVzYbOU!e4n!?%TPR;bhxkHUxa8G@~O^Z)JI5(W8Y_B)u?LW?r^nW#IMu5Eo8 z<^sVfC~ntq@7U=MH{EEa=5AQmd_LrgDEPftr5ONSlhLw&{j0bAhU7W_0eav`FSA_2 z#t*Aq>!*KmIXDh3F`Uz;eJyrOl)rVqA{l2aGtFFBmntd_!(|yBZYCPO^CAjO3;{K0|S--&~i z9sZbFqQD9_(LMWJAz2=?NvLJxdN6g%YI9%oD7q_qdK#=(6~1}rbnY$2XDroSzIW9e z@FGs7c8EjpG!0XiZK|*v2a+Jf%~ukF_O&D@0q;I1E9WiduO%Chi9kW`UUOKY-}`>n zX1^TpKfIUwk7bySxzR!|MlkYVf!?S?hZ;z@9^Qga0l5nO;eDdrn!*9K@Km!)%kPA0 z;G~f!h4)WNir}jBc9jwT#?8U|63ziJ=Eta$l!~pHvE!hiH9LFd@ z>~qM-$GIL;9lJeewWd!u=)zXE`kK48Y22U67SJT?Iz zIN-;9P|rH9Ym4$g`7&0BjpEzHJ^b+v-IP7sG!dZBTK~OttF3(Q1~vbv&X5b_v$*Wc8Xi;*EQRN+lXS~Wyaoj2 zmT>{mB`A2DhQ|??1_@S>&+Cb|5>cv*m9=~!RXe_~wrtE3sl?mS+>#6p!@m966JsF2 z{M@wTisynw8m>k{j~RTN)p}gtlB{)=u?z2-C~z<1ljOEXY5_~_Y+UOH?E-z z&#tm7#4WborW}vN`ZW?*O*T<2J`c0v=J$ECSIw&$^2q@cPk?umFHnK6@u3?lMa#1M?zW26Sny_#ReTU7Ag3KrW`nQ zx~k+-1op*S(7NN&`DM*C3si&k2J$KK9ySF)5`|uW7vGIS z>-HW{#C-TPTVxva)}}K{m@m_nDG0WVjzr9w0Q;E8$F5k^k8OU~zC4f*SY|B!>~h{4 zFWSu9WT)rLH|;F^KlCJ7(iWIJ5$PtLCAK0$k%5(H`>g+yft3Q`yOUHQ2y*kRAUd!u z-PHpl)QE&#zssLapi~hf%?Nzz5zydgRl*=w$a19>Zs@PubEy&+yH$b5ob>ol%oZ!( zjr;{}w3cVCigPpX(7%?^7d`WiF<`Isp3%b0&1%{F{e` zb)ZWp@Gn9pBmekQe2k|hVog5}b}C)}JQB_@RgmMX@!7Ol1gW!|GFv1b3!`a^9|#Jx@W4VcX{#@J3@3N zTnI7ZIz=EbHI;kC=&r-rdSxnTL|KiW&p+HOk^NJwqW7G5ngtasS)&S^;RTN)p8$4S zKD}RPl3}%n84S~xi^?9|CTC1Kah;fPQLde3K4nx8O`8yW!h&o2%WP2oFp^q%<9e}S zZ(Xe@Kl>rj+^L{7Ycy^>NPwpo#o7S#EgK=5zo@+}S?^umB z!=e-a4M4aw`4|17<55nd%gwF1pEgax4QB1CK-O;*Mh1E}A^*c>$pf4=b!--R@=>xm z>nmjTbngoEC?dF0D4dYMNtjbC9$$A+qd_YPGBMIAl55;-GP=4f7i)?vW6IP||313y z`x$p=7?2j$4KZ7~r`i0hxQKVeud<`HYf=w_$$zrg!5Pq#Ok?j>gWXq zQGN<3ojn$mcq&?}MI^i7W5Z;r@?dRQ+TC^>hWo>xD%KsxOGMXAtQ_kw2g!@Vv3UX* z1Oh(xlLn7tbt0yMLmQYAY&WLKM5~}e5=Qc%mA%HfptCpL+Ip-WnI&uEOaz3{jUFbL zwcLjpj!tZRmL#WP-s14?X58Vs{IshHxr#XP?zGWQ z&?A%UBz%~Vc!*(Jzvyn5S zkpovlC-l60lx1u)06Gut!Th=4{<0BLHXZG{L>xG*H;BWAK0NmiFa5#<@!u6nUg+?jP5aK?BvxSI6Elbv5gg6dM-{j!bzWNp+nPQ?#f*=*qATAAzfmI% z){oo;^pz`D1fgaR0yj+XxT2sC6iMH(bk&G#2*PS6i-7*1u9jGPdAoQ7{l6wF^jl?^ zR-VL8IG2-+{tRa0C4T^FKM*(_OEp?TP*^13WHx=coN zMI$;*a>tj;vx;{Pd@B^LE$tbcxiE%BU0qM;G)eE+Ir&^bg?iVDHoZ(4FK`l1yW}4A zF8W!o*?g|0PXVklhI|lB8awAet2x=a&16EG_+C+PURY#St^!i9hP3g5(4Q{-9JC9J z+*rWsh?io!e<6@8bs(!n7x_if5Mg+a{jwckdClpX_qm_MkCiczOYY=z{XqHk?e5-Y9@$Q_9u{4=VF)sqHwENH3n>t43S_%}ZBoZo?4 zQQKxn0TX3pF#dum*sHKnW@F^@S8UjW14!j5`EfMmU)naZ8}Ol>x{t+XsHp358MSM= z;U|rl@0e@Q+#O5nQzGaU=)w8qWb?$jIP4ZnLAC`t^rFnKnE8EfP-kxLuGYmQYyJ&8 zN?ibqFK5)Y_9>;gcGyV{=QE1|U7zi2*zbK$N^1Q0dqF6_uvFB_R{yV4Rw7NJRw~;K zTAucKD^Zfp*Q0-xv6u);2LjN(*ejUOiLKbfhFb0qpOfDpfRj0GNB9@28bHwPotmoQ z69)Zy$eT|olC7MIP6Ztx=MEMWw5)C_XK#}^o<(JiL7A|Y&8@L(-r1=_>1AixstypZ zF$|DnMq=E98tf5{ij~aK8;zi(M2bQMtjZ<7#4CabQ{7E&W5qSOD$=6l!SrU zqoIJb1^YrFU->Q-8|2WANmplcP!KJG&k5{GB5Us&>*WP8$rMiIx1WMjU=D%z%#(yr zCOOJ3Mfy3eQoKh_lY*?UfbIO3I3{@W8E9mJw-!nNQDy=1Gb_D)=C7{OUtLcHRbxuC^3#{Egj*H-_uw)%mwa|OmHn)F>~dNK1eC>tt_#aDrML+R z;cL3WM&{kj@H$;dTfoL|;_!zAG7l17W$xR$vtE+ph8r^Mh$$R2YVh_we~PJQ)UF9Y zC~WEws5A|Yj`O)2B6DmY$Q(c!euUs8CDo^d>i_w0DxYa9=t~)C&R1Sn`a&KFp-Ft5 z)Nj_>D}O_C%N=v7I&{#}!;7$1xwG@F>hi6vn%2pr2&BB1e5csWY&u8MPQq2>EX=UQ znN;^fOnj$RplMvN;BkFx5*;Gg6F^eYqmu2x$M$DDd=m3VLw9GvsiTJu@7{s6dWmM+ z&43UHEN@>48j$fPAszQ@p;G)RG6lBH(lNrgp&5L@@P~Ed%_BHc7dH*)@11Z^1<%DD zVSzXYjU@WB`=3fr(y;0TIvQ6Ox=c~#{7MksghI<53K⑷Zj;kSQYao4deIgTqd z@?D#bZR*rhuYXoP#ZxqhW*MDLx58JN{ys))^ES#;+foLzJ~Zi^QETUyOvr-EeaJEZG@S@{l$3SQk-3^?BNh>o+6vfLK7BdKSe39kD11 z7M82lQM_HvEdq8cf8@$)DutfSE`hsU6$!X!COm7`bW4u+?U}+zz2p`i(Ts}O!EE~ zdf&q{s#$txX{OjeD-)oQh+Lv8SSkNcCX{IB~D)g8x&W-sr42{y6}jk$3m3D5mwf>NW~1cYLc^n#DATpH`~YMJ0T zzJo*oy243hb0|d}PaaZbxG{4j#xWTRH8On>x{gz7^f)t9AJT_e)q00$p)iNg`pZdK z&yjXS=6yh^y4MGOU)1?kv72@Vsf7DOq(jeh4lCaDpW0OJT>9-86I9Mw@3aiDmx?Vn!sLp6TK>v0-0O@ zNSk3t-(bbMBCXcIebbJmrbgRLoHkqaPIKG!ULd|PR|*E0JJyMBjll>iK&iJz<;YBh zRdF*YVkq$pWrg0LOy$eULzvvg+UQC~^$|IScaeHhu34jYpBVHg2)-pqh=-Lxz4#*( zxY|`tZtt^$A{Rr}VZm;bi0k35vwq>F-FjNw5_x?ar?b9Szu*bYzhUIfL~48__p|W6 ztFbI(LA6gZee!R_-X#B6!k9}_(&`Q_#n^w?wqBZR(gxI7t&Mdq2#fM8K#quv)Z@g$ zv_eqMcCEI{kuS}XP3P~Zh-Fs`}tG4~W@Ob;Om@4a(`_qfcy!NB|2K~k+ESa`l z8`e}m<}S3&>Getoe3*wO9Wo=#q&xUjhrhMX5TNak&rAOypG=UOF%U+ zIkR$569$BD=aIcbN5=)RX3mk>S$bWo3$#Z+e{s{tZjW`#2iVti1BD1^2CW-n0f+nO z!4<-~CF9zo2HPXC41+fHn{pURIxwXJMw}gcT9o{-Qk-8`k^YOn}tH;J_!$A^3 zn=9f5YQe!Bd{7}hLfckmd#{F0`fMYDE*n0N<2Oz*sVpWDrm8+~k5BH}0oI%|Rzx7N z(j684pvBhiFdJ4slA!1%MD~OXU5w!ade{XZ%44? zSz41{B~RZ%$6%O)IFZTbD6G=o&G3BJ-AJ%lxU?CCIuW)0?+kmdvMcF^)cX2$SD!rC zTz#BD@VoY&BIrC(3{vb5Hh(^rdt~8kmN(UyB2b|}v&&$1ZJAn5!$Z=HM%N!jQiq={ z1R^BO($+mjUq;fwQ^8F6w?>w5s^;AA+$JxinDT*+7`S3OD8fedztCwOx;+nC=-szl zGheRHI|&}qRmF2ycPbnp_bPtJZpFRbDYE+2}-z~X8_Wniv zg~}cQIT0hQv4<_{pZbA@PKb@o*m>0Bv~X42SYg1Rf@3;=%mhZVTAac}7+0;;6~Y4^ zf+IhaJr?^|)pUU2G*R<|2EjG4RdcG_{5FVjs!QuKaagn|4cEr2b2i0Ny-N+7LT@sE91MkFy0sUMW*});iu5rVhvrq@@br;b6P`lExUG@812fU3U-fG?F-PShdIYc>M`;q zR@~C>MYVF(#wxrNjil^v_V(Imj4xTzk1ZVomE!Oyq5H(HW(AIkg&I~bS!oVXk*}nG zOSKN~Ymo38X(=_H6kDH+fv+UzdJSkiObX(MW?uxuOim9A!BWC2(qlXLvN2_Y!lzCn z^csV7pMYa`{Pm5-|4Z#=WL2{t9Pfh&*Pyl#bh~pDg%!`75aXQfLeh=`^}p9z>dL30 zlOH^bkQZvS+K%DP`^nf)gG+!WVtxeQ4pBPY_8Q9pQf7Dj4shtp7vJ1h553T~qT3`I zE@{<<2swoMYt~ahpfmS!?{n)m z2ED?9(W|gH#gD7hZSKbG;*Wm&bfd}u${6L0Ovbd#GA|VNl9ivdy!d+lWt?=L73~0n zN7(7tN%86nYu`%+Ks$lQ4gNae-M-<^kuw4VSe&wE(cD@VQCl7!X3L4@TwB+z_Zx~~ zS^1^B(Jm7c=5wI|^LSJ==EAi6~3jUr`7gnd#jPeY+tHmm(5HzW<~^m zuD&18BFV9CC8?h2wF0Q}YaeTVjsp@T)r=2~d&DqIPQA@lL+8zwr`M<_i5N_Dp!k40 z!Gr|QAeKtoy?JVsG@;w>bS0}@;N_Ix1bTU!>#m)|9l~7GYuoEtf>d?N8R@U z(5(~?OoD`?x2=DaO&%3%*Ivh`Zc#AGX==lm_+EGen&u~ez92MSk@I3zv`pXRZ2CD) z_xt%cAN0_#6-lr;D5{zKm6e7?f)lgg2}X6u24dK%;xn;M94c~fl8A@Bhh6jvX7rgK zR@aR5-+vL&`V3iLl!Mu#_-vSyR(@g6UXo49k)S=kHq$zfxfANk8pvmpGPPh$SJ@+z z`+Ga8T_DS=myrbsWfKK9{TK{VVDCGv$bgGwC#rL)p%rJ<=;L)>D8wY=Wl!D&$jnr` zbDcD45<8c2NLnUU#}(y_e(zP{^^iX&$4Q{`QNs~x&Fzrh^8NXBm*~mO?Jxwma4L@vRMh$XR1>;L;oFq z^TjhboSZrA-;t^bH%37FUR}>+KIe@~_uAe<)Hp96<_~~ctpMAc#B)|p>h9XIfp}Y0 z7&KQs;;@dwSX0TCN`q?0LUQt-R4XS}zfv5>JLnv?J+?Gzi z28H)gS!;W6qvW)$UWtd^HQKWn*1Qzshd%P3tM4Rbt@bMndqQYf6dv;4z3-} zg!7}lEi~m|CySuFSA_2mfflM>KWZn}CuRcak%y?Ec2!EWj8&pM?Y~wAUj#{0PVu=> z`sZSHq()D&F%};|`u{!a3%8*Hl`vyL~Ij$_*4%Umua*Pi(h^ z<~2^YDTz6rka}XRH4U7NMZ=+BuF6lIqx$R})_>d7LQd2E0)vCZOwgxPxx;Av3mXfr z?bj&v&+4{Xg9w+M(*}+Ji`5=m2Ufe))npXXC0`=b*8!pElI=37M2?Ec7Px96u1Xv_ zjKxwS@L=;5J%4_eWf%x8>gi4s2I!h(*=K0w*0-DOLI+Howoc>u)UaNnLhSCd4yG&LIOt$MVbl$7LRItujY8q&C zBr}6l$y0!gQrx0YQzhL>JC?2Zr>~1bZEG?&kzRE+UI|r`L(Ko_DpB&#dC&Pa!+@Vf zj4uDZ*;BCs6%Fu3Rh6)R@H&mIw|%Y!USk0xHMS>iOy?`w&S9p69yyHUo%(41V+9-2 z2ZnO0Wp^|?hD##@-Exzx)t8$ULrD$*Y9|EJv?+w|n2jXP8n( zbYUjZdEo<5CSAFFvB#ny*bkAci`KOcMI`hj#O}0MaSXJ5coNw6QNd_nZxb%?>R|oK z>jUbd(?9fmpIRAI+xL`3-fIYaYo%r!<5P;OQv=i$^lf?4fQT{m$T4LBh~Q%<$(G^Q zU(g3dn!L0XyRN@&F5!1+qAdIPw@qxsbaaKa6^^4Psl8Ijx2}0Q_gbb4py^;k*Bbw4 zL)sMjf((+dqXdZmK6zgzI4M4c72s!dVBohwUg>j$))LDmoo4z*sTRa&T@ocI`2K4xzeoyMQo}%6o3U} z?_^0P*spSeQbhdzVD^M?4IAAbV$8eU`ZJMTKdpz6xaD0)`K>@5*@+ zqENnP{J|UN&BF|h)NEjmaq5fg~FU@Zv> zugBnvk*iV;oA!-G_DKzt_;$zUx!rFm;No^T-9rreP#5E5%c%!yPq;CzibzBxKw!F5 zisP(#$l0&ppFn>MzTC#)D^^OOE#h&L?wf-Z&JAqh>w^QXrnpRM-!Q_fgQpASep*dBK8uq>wQM_O_hxSTp&cAX7<?C0O3<8|dhG;?GA+?;gP zvII^@1xnhxFyD{a!}FZ4Couy+b8l9i2^AKu4yFtssvqrHtL*6IKqzE9lBLIHP>u*{ ze=jX^d_`M68qp3=6vlvE730Wd&at>ozfoN>R{?cLxF7PS+r4@;SJ!3AV5MxybL^+; zz@nkeXzL&3gX#Z14fI`s_fgL0uUga3qWzF0w zylO!1M}TssRO20-0g-Tm4Kxx(__|^|neuD4rP&B?eqx#8e;6blA zvXURv!KDI8S!tO6P|}nEkd60cvDjAa)#DliEiP^HFQzG~QKQs#ic8@0cSKbpFb)*4 za~&6VuZBRwz3l#VSz!H~g&T~KVc7KfYB@rb+rW3fQwi2Q#_@Ck3E*t?0`A=i)%ERm ziO0mr=-g@bj4Zk5EH!1ht(DA*B_bH=H{z)TPa3Q$lV}r<*UTH6bo3Z`kl7@NC<4xv zGw4FHz54ViQn9@0YXj^u-7clO0i> z-G~aMH7ABly%_0g5s-mFBvAqs6d`qd4)8`2<>)FxTHm&Ne9R)bI!)vQ4&y3Zmg9Yz zvd_}HUYgM0%MPvkn?ULKwSC%f-sBgjrfl>*2t6{d9$$!A!~EGh1UDQX#PW%9?c3e> z$Ey2aCl6Epp%S+E$U}r*reu>GM7xz-#409}>!);Zbk|As^AEshNFQpt|$nZCIH(npg|Ir^Me<7!(fSTUN% zur~~EGN=dCg<^HGDW z$;`$vQC14+F_N?G*~>4=fArw!r2qij*o+P$auGsJFD(PwvbOa4V(tAI(`0*Ma>n=mrCmY* zp&BqMGtcb{r@I71^jT4VNszKv8y>#w?CY8@)N~Pub;rW*J zmz%e@M7KwKeK?iJCt*LoGDZt30O9VX(yFd)CUbPo-{;4 z(|4$9@T`RtXdfqIQhm8NpT;(&5oN16{&BJTM7(EjOQgS5kvM15AkQf1t*CgLUT7<@ zg=ZduHk;X@bN~2xE6Rv7F4+L!z-JjZTArx7Y4(nUHIww#CdCz>09K7>Ql2``&H)GC zm5ZCNVcg;p%lCTXO8{}SEP?Qfe+4XgJ1ejIuqS-e2eWd~U$~46jAr|b18j1{WlZNS ziu@SAJP40I1XVzAxQJ`Y(SUdD8_?bek=cqsSz*O6UnBnr4$WfJDf_&qZtT++x_B-J}K0{^0iRAYepKe}v&?ZKbWt%z3D67jx^L_P{1zPU_-- zaHDbr!UI1Ui_aHTVIyNMML5q5%)#Q|!&&wp@YjNYxThkrRB=kTjmV>sICa-$Y(}yrusSfni589#$H5-jE_M6` zod|zw{SwfA`tGE_Y+7IK^3dPN9aWHRxqKAD6298ji!rgH5H&;I&Qg$|ig)p>dqpwM z=g^iMg5r%XM}sFKf~>5lpGS&@9_-na6YDFcEz$Fw%@fN&i-KvN0(S<7h%VI`?~fh} zu(||uGOPuC1h?9|RvSosnu}+jo&mlE&h0D_(Q^4r*&<8v3DoT%I7YE=iW;KFkc*<_ z<^)B0cM1QCs!PnAWLw#X3`6sK4-Rn+#V+e>KMck5*89w8aU1LLd`!u` zJ#ntFtP-FN*;-3g=$E=v8l}B#}*9l!SHoraAq9Ps{08_?iYTz+2Z{}IPE&^ZQc`z6G3*s%Pf6?Q8vBBOT zWLbL9nNpr)Qg+dGGY4Ss&Hw~=8Izc)QFW!(Z5r^ng=5FJ$;2zT#_uM?9|c5P9dqYM zlmJ!DUD>V7DgTkiwHM*pwc0W{lnX#yTuvg4;A&Y|(ML%jn2@5EB@*faSH_``BT3;5 zi`A4XH_@lpk&R31;8aRh=RIFa)Q)udW69kJOC zkY*at?M({$sJAsR7jew4S+)zk8{YA6+=!0BLKqFf*M)iO_- z>=%r@A-G<>KU=tTRn?*|#*x_8p3JfUFl8RMr8CJD?FfXbJs5)v3r<8ABo3Tp03Ez1 z3_h`{MkEMze^z^RH;C^*aJJ0)V=V3EU$4P7$ecnkMf`*ajs!BZ{>v?*Oj z_yLdlU=gSRLW)fH1mG{V#IxSQHYss~Yf3jNK>rSLI%dKpz;0^qy&<&Xny4y7HAG4f zfP~w}z3>5&-?*Yv_~;memfH%Z=i$nz!D`pUZ9S{|2@@Vl`z}Q>gA!talYjz~YZ7#p zz3WHWu*dXtSiKfF#qbqi#0<(N162|x)O;AL@@AJj&Ew1$TDsKrBQS(kDov0cPHS4<$-Of&vD9T<`0g88kow@sC8X#36_Y zDF1n{9BV9L5pT@c$^y>7`mqu;;6%^>e>5zdijn}KyF>5veZ}ouJd>}isAaau)Pru$ zlO0aBLD4W3n-aGzD!fYsPPD%3g3s2(0#6#F@ILQJgxXMm04XN={%7sBXRHu+C`#g( zgjk7eZ!P1ghG7DG1(f8@F5Wk1C214@gk!M{=Q2VCZ3BlEKsXHQDsyBjrqr?fzk^t) z*t4plPY@mhyL~grEfSa(0+wn7sB8*EWRWT%j!t4W(&o1NsLQrDP4zP6&GhCY1(+Mv zO0p|3f{+IyQ!h~<2(OQ4Fg3o%7`IeoH-j(_)Ur^Wg_`V4kNsU_z?5sRNHgVsae5R? zwhRjJskEr}fonSYK(G&wFceL)Gnk?bFC$N()5UL(Kklwme27Bv{=5DyH| zqv$I}=cIDi(AX#4_DS~yKD{QAgExb!^RJ4Krkyys@gxmdr*;YAO>;H1v?*?Y$LW|aW^y7Mu|gv00d)RT!XCx&JA~QpfUF>8 zQ@|solX&&J3N`DddtmgZHz5ZR<*mt}EuT*Bldmnt;a|R2A-+Ta4(1Bc01roRlGzfS zAPTWgy)KaNapK0K(7;JTi6CH>^PeNl&2l92q}^8=D1#++i;Q_?gX9BgEB37|}9CQz%&WtE)F>hC6?G z*`On^000007C2Qz;)*BK2IrbEF3r z954uzeX0zcAKGzJ;5VqvoIDu;^_S|~-Ibpn zi^^Q>&OR#zBZ3rQ00envE7RxCe_n(re4qdT5ld5lrbMy<)^>g(_{e7?w;7>1!jB*+ zW%>69#_+y`z`y_i1l4}fR6{&!xZxBA5%iSM2IA;3n1zwGxzm8b>F^MVl{`GJXD)RI zqE9H<@FX7rV6J+s5@6mpoh01>OjJNUifkID0c&w$ln-3GFhJQ4VSnZtcI>_BW3``A z0003=h{GJ@veoIy0FPm5#QxG7Z=p99lwy_9*bHJ@=dZz3VhKu!R`2{}W4vc&nx5US zo=?AWKJCoRs>DDY6DqX{4*y<5UV`H^rK6a5-FF-iNVW;}Ppjd8;?;XUvxn3MHhLN< z*9@hQUeV;qGG!lR1`%=RE8C<#-Pbs`fQhYemle!)fiDKsqZz2UeI4=knayfV;=106 zHOp130*Srh2$)N4m79?BL=i2y!2)W`1VyjfXcM;>W6CMoYWl&ufMca?3d^5O%pTDu z+)W;J-z8D_`nCD@rGz!_987I1k!6IW(!!y}TJhEsF}i)x8%7m9`Y94PM9g}|HoVj; zhoZ>MI>P){7plf^-7`(t?$FhCOD_Vwg&gWVLA(?P z$vBx{X4n|r*vRl#(`=CErq!3dZ{-WjrZv1;FfAX?aE#LGjR`VdQAfDo_x9<0<>ZYIY6TW2ehL-I0=bGtFmif_Pw`kLEP z-HN@B>qeu*Z=P30v!LLj3*eK>7kh# zQuT3ZX6QqkF&=cv%f9?s703G9;cF5(HCvwlc?_S{U9CN(`{B)G9a_^gwQhU#YY-zd zVg;S6YP{TEvg_evQe%T^C2U=^F&ISZDoY2c7!Wn$`P7FO=Pq05A5PO(OKI%nl2%UEB4<-HQTR+=TB-!2fu7JN$ztpEm=`g}|t3?v1!7HmP-1o+RhFoZeu zPa;Sb1L=;BPBDI7KNrRdYOIX!>u4Ox|O48P!1X#9VVg)I(VwJ zDoQ6%a!B$hDprbFhOsE)O~s{Ue3e{=wYUAx3BU9>t*JfJl-3yZ5?q3XxY3VEhKe^H z`7M0gvqLL!BbfCnNlg4uvyv>;(6{y(_ro3QRMO{Q3!V1aqvG#-E(AhGx{n_AO!~C z0Q~p{wlc4CR}2V%lg|^Htq$*GP&@?((bU%e!$Vrg9i9D?$2DEevEEF}huS(`4=}ct zwZ^0X#D)Gz)7UenSY)~?qxY?$1wAx~4G&Y(QHI2nA=@QkTxBd>8L_LOOW?d&=;#mu zGt)4dZlKjk1(190wApAC(1{XinS&a_$`+Cdk0Z8RjaI=##3aTjSW>qlQIn_Rd=uc( zXlNgV95emx0u~q`av$p@o`GUTpN@T7XDU+W-eXF5D_)6VXtB4Meg$sK1nM)6tzAI3 zH`%I2zM6=M3P9!nmMEf0xSVG-qG|Ehzx*&#l&X|IkhggO5tEqtE^)t*0xR#(bpy`Z z#72ixhx}g_f9xm;{*K^+ea8iui#cIUd zQH1&=7xDMC1%?L;g`QdkMD5b*nc{p-$)rlI$+lI%%q!tclS45TmMn|0IML$~>hD7* zK$uuQ%q7(&OVYa~4cEyWs7Om_Yx5Qb@4$EPWTBq!X5Ycn-U|1-puhwo=0@A@^mV0n zJ{1NkEN%_KsJ?glHxn<>%={*^6xgMkIJsH zEjqLI@VyYLHH}P>7M)H`@XL6>PXlZD8Y>=hs>Zav_4!5`kFL$bn=UTt zVvUS+ehC#bskIZfEa2Jl zTz7mt_HJbb9d19UouKKT4O4S;#DmuFv$fyYLP4#Bj>Tt>jK3~K_D0T1%)AE%TWrX2 zo2H&S8|orGAp$di8}!_{zbq+UleP?>aq2(Dt6l!kD|oL8lpk|cN(r=zLY@P=fs95#T;&k zSO;%5-F(CvD65rE39L>5px!wmQv|jvf)~%!$?>1b9G=_Njd=#(Kl7Zw$*S8)_r5$3wn!CX!-nl*NVn>#T+L`zA%=4n zjvHMAE6XAe(=oD&3KK`!N?M+B$WW-SfVk)*gx{ju=YmNa`%$DNm=)LmJm7YR9rh1p z0lKBw(iV(87vB|(DzK^9nqxVg%UK`>l$@~Z34!1|ns-=IU0DH#+4o5%wR#8QeNDI^ VA&R{DGThT64YZUf=R&jq003{~q~ZVo literal 0 HcmV?d00001 diff --git a/media/so101/so101-leader.webp b/media/so101/so101-leader.webp new file mode 100644 index 0000000000000000000000000000000000000000..22ff3a4bc55e86ee4be18b020ee895beabe04791 GIT binary patch literal 154650 zcmV(lK=i*-Nk&EzTmk@BMM6+kP&gn4Tmk@4qYIq@DgX!o20m>zmq{cdqN<_NiGffJ ziD_-bk6nKo|Jy+Se*ejjceVCzLcjgVK`BjG=kwNo{aoDt{`CXwj$h;Xe>PpuJcBpY>t- zJ^8VJ;r;0NpZ&c3$M)OQ#QG2Z|2dyV|1hcrpEV_P6*y&--To zgT$ZJ|GE9C_7wcr`rrHC@E*s%*WuyEe)j*m_AmG2??3my%73c;x%o$;PuTy(_6Yv( z{v+0}9?#!@;PwjrP3r~!FaOU^Kd}GC{k!-g{)PS@)0f*n?yvvbSnrGA z!LTVn>J^$=&N2YXlY-a$hON7fr`2nmVkKmed~%LG-F5v0=|kq3BzZ(xMyg{+7Ed@1 z@qe4IT9+T^lYRRoFNxoVVB!7#w1h>N3%s6Vc6Y&@<6aC45WRt}aB8Bsa z0#7J5fjK03_17QC<@L*QC(`Vy?J8wxLIID^mPv7o-Nh}iAcbc-aNhag&?4NV3O5k&=?hgzkAYtNNsON!60AM9Go}Z z>qmTC3=1oAI?3(4v#K>4x_&f%qjQ&M=4Xpj-qr=6HWNVXX!^XLJ#ny`cs=F_--JD} z6+{9G{lsjN5okR3X^zEc({eA!0t3Fw4bf$f6&OSjwx(l=El|EIn^2F;9Ofj7q-6(e z!1w2LuLh2!d?`fP9w^lmumN;LNdAov)^4|g>zuM3SwER?#tL*Ek+WYhf))#r_kOsV zWsrh7XPj?gak$bF+oK1?#N6JXz{`X+j1_9H{V2P%^4W5OACoun{TzVon953WNC?p9HLk3J z`VC1GdR`)dj3fn7_-ni9WInmQ_;O?BUf=`x)y-aIo#!tm9SVY&>};}i(yUvsb2v4M za_r>X%$(LZp6yP7$rYOB9iF6oG4w49Cc1LY8?9a@k)cXFv|jFX{)%E-F%yOH^QJa( zS?)TTBLQ>^ zXLUu$$nfq3whu;>bJYZ~5{%^nq1*_F^}l=+3AwtBm-cFp4$Ygw=Bzs)4M7LwGlhF^ zBYVHTiVkat6lq68ULu&a8z8Ly4%PE8C!F$=WVf0yJ!=m!Yq%KyYZlpYt{;pm_^Bb` zA8^thZM_be^x;D#vomWef~9RwZd;tRbX8Hm$Wi30$zgIPvwsd=8OBCe6W1}_M~mzu zmoI*UtF^Q+GH9v%wI!$%D*N3AQR%3!=|o|G*o@ewmQg=c;bql73C_HG@q!Ads?zOr1K7 zxphanX>NjG0lD3Zw*zt4S^dEixxoG@+8doe*P5lGJ*k>Q_Z|`)rm0y=FPCxZCg}%+ z{bNftWA4^*+^x|42$-@et2yvL4vFJ`a1;)-M0#AS!p05tb_?wgv8J0dsuYwiNmZMR z{G+I;k@6r~Xo{v;aSoIrUCAFG3ejpl9-ETVx5!;QX=kjM~hK z=AhVFs$+sFv9LZj`8Hc6ltk)BMlNb5{Hyi7rX?~h=;*%|?jO*nJ|?9Z6n^|%{5U|- zb|WXl{cG;Daf&&5J1}e6^7R#M{%Q^Em$K%Wi%!j(ekq;t9KuM1UHdlYGGJzuUN=`$ zQN|C-K1c_tSEYt9{H1}{N9nH_vpQ+UPZv{90Of-Odx9gi|Il)8eXYIzgrSCb;WM`( zLgzQeo5Ba}Qyq(jK_yxVj9j|g*{!AxpQuPgiMfFh@>_h{vd8kJ12Gy_eeETvvdq|x z@gz1Yd}Xf(H*!&DuiQHv(9J{p=KBvQPea9Un0*&osl}uLsMqvs!Yrj{&&5L@O^yTq z^7hcjAcA`+8ycJ62UN7`E)#J1Y&xW7z5(abNGd?;64ghq=HwzB;@~~G>_L!HxEPMI ztiGIVSE8uiSoHK&5D0n$P%R}Hwj&=x91Bs#;5r3=DwZI(5vAm#@5I`fkLp;P@u_E1 zae0YSoHgdw(r(K+4;+@MXV{gRR7d(HqnStZNS<597eHvTG*W3~`8d~!6Dv$vqib0& zVUoK)Mk`XwHyV(!oTAs2$LFG{k2|Wu3xA8!o-Gl3@mou|x-l2g!4(3oZl07HI$CAy zGg<-@A6Vu0Ykc(n@r-2!)a((JH-3gX{4fAsl{npCmr+@7FwMO=4Ky-urC#+ZS@?G~ zGI8%zl5-7XC5-&_>u0lelH%nUNbY0kSi+$`Anv0Dr35fIa=Y-qUU%BNu2hoK#R!fe zY}1x0`!i;gC3KUOvTP&pfi+>nTJ*=Uoil?witYhAl4E{Tn*2?>AK>5%49(y_AHRTF zs4?lcq(bwr#%tVOYm)YT7*teC5TY{nI=3@FT{4sjTGMbNfsLfjoM}(kB_ce0(UTVAhM3E8h|Be#Ty%JoPh{jKlem2vz~U2 zB^<&VGqGboo!dR#zz6iz%_G*3Wf@7Z*_*LK0CiF*ol8dXqdA|e8yq69YC+}c%3)5z zL#g8D?@_~^*s{0cBZi+F<%5P{dd2)Pb(%aV{$|_PI%$#u@eP7dPR;@uQNZNfVjrFP z8;B+T^_Mz2WjYRvU=-2O#jxZ&3ef%UkJX)kXJcZHoV-3N)AsE$^Yl}&eQ&~AQrZ@Y zQ8O$-_Ij=(lqTRMXu)QAfXP3{d#nm%PC8?$oBu%~#uD?zmOh#;2grRNCW8vU;I=_! zHPp4MPNmQ@+%?NgSi))WYCY5i4sTkVJ+IM1zV}HZ!>{xE;$Zb|&n8c8j?i9fBjF>+ zH%}#%7{Bo^!kP85z}rkHK1zCb**lCj{{B;$cX>hBvFbHDW$*skzeC(xzOn#jOe??t~`$`n6_s&K-xpaZD(V@tbCa941n9#%1kTrgiSI zs@k;-8;KB5cG@-nX&B$jwZ~IC0HXTT4jU)Lm5-f{+FCTn8;dK#y>alGmce7sME~N>YTll^A2j7W9vvw#=)3Q_X~ z5lQ3yU>~_Jo>YJRCHWjWg6Q1jY+K&1{4n8Jq&7VP|2cJxsc#0@^vwArSOhPC7_XIk z3Q#9}9N_+ubcYJd5Qo3Yba#Cr@Th%)}PIJU8KJl6bcQ0DQ)HKQ0!`nl>n-3_ojt% z*>1%!5vbNkpLS)9J?FP1dndW@Y}YWjCibz?1&uWx0%uiN)|Rs9NQH^UCqd~&&YF7c zn!3ze3mp|j+}cJC;#G7e9~NmNR@S2Yh6!1JK6NL-l6f%)|Dm&bTf&_N8Tu-}VbdOY zs$#IWJ?rIIc2M1Ylj^q_yt0i-L2n3h&R8vZC$s^LjJQ9eHd&V1TJSI{Hz|R8K^y2r zNaSUR6IYo3v?11~taxixB|RWQG>4hsopRd#mWGi|xz&l5lziJ|8{R@9xKn&U@C;`A zi+$M-hAd53aj5$$NSm01)Sj*+e=syqu-enph9D$ZirZT{ z`ZyeOWx+5q`}u-ooC^fwSz>bkNR*6Am!60Jm-XQy(`A~nM&Oma3uc%~(jSr*H)zXs zd@Z93wQp>Zq2cI_Q<|7@LCrWcL!B!;@Fje-{d|Ol9==~YqCG;KZ?MqOs71&ya=WcSrp9pC)N3k-a|7jj0R~cB7oy>Rj51$auM4=f`ZnU0(FX;%d*2@ z4WW7}&o$p#u3&*IRMeGeuaMyX*u~!&UHXt+j~&)Aj7a-|HXk~A!}fsl@t`fsKnmz3 zN-nxyZ#e}=GnuX!d;|mmB0lNrFPnadRME|X-qj5_%WBT;Uej(cSHCB+522+Nd4-z@ zT3LZ^9vBOG3!8p0sADSIQy&2QzxAZ|t4IIo9b`4pVAnPbpaQ7CEtFq%j_vkvH1C4f z-*hdrmGg35_!32H{o2}sucUs~_`wiufJXkxsn?kLbRHwv9H(CGUu-tr%=b42#LXp} zRcDe{Uor5&I%D9)inB3*9YhEWJ;NRE5Kz?bY64Ari%{^y=|n?(ldja zg@8R^tJ}up89TQcQT(4j(d2BNws&LPARM4%5b;F5=y2=%0BYufJ}epirRle3c)U## zkyG)V^>^_9`FXR*#+5e$b-NQVg**v9oIUMvo4zIU8&B@I0-qa?{k zpi2eC;0KA?Eyp$`lb=@({crT*?C^- z;4bx)3`jlXz-){i9Ve_E@Lw~q%9f0I)v_+0Z=$7HrC?wI6FMV*>Rhr3aH-H9>YgZ47^lLH9&q^Cvtmu^kNriZ+t3%Qno!89#2nD*l;^R0S|nqF=?}uKYxx_@Mkz z#07DtiKHlh!=MHaj`*?uS;4EXZ50`vm0T7_usopg%Xa7ZJO{T>ss_uMQm;9eF4Ij< z<$SPIDp1}NNwQD<+zc|1bG~n0bs|v@Tp}?T_t?+~q8dT?Bd@^vae!Q$=U{b8$g5w} zM}2~fv2%?Eob-Gv`V_#}Xtn^24>0^D72VLxZYAdYGh=yMSAsML5)Y-`K1oq&Ix(a~ zPD4lSO^n^La88e`?cOXwWRw@LU4Ug(17sl5NI|!q-?zb6rQEZ1>{rT8qmCOCI8%~p zYk2f~&zDfD&MW5h_3Jmv!cN*bqVe}b^wc*T`#5>CHxWev3`J$fP+?SN1v^^B&obk>0d48#|qZO@^qE0p}$ z2@aKkeCU9>5|-UoZwa*~-m*FEg$(43K2gK~mFTT;t&r1v4t*7x>M=sE(O=l5PRs!b zvdT{y*)5+ts*O2ah&LD_Es%`cbsB!MRKx;4lVWrp4wn}R8_@L198DK8gbJQJVjK>9 zsL3nbgeGInksQXyCPwG^4UX4?4|-raH)I-9BmLkeUra%Gfc+mCPb8I`!6FKWna*5Dl{EBKax6txOJr^iRq)P zP3ZuYvB;%O8FVE_GHAyD7}nB(BsSD|tCC_!McC-XN>y9^`M9)+80}+HZQ+>BCYMWC z;j@s)f>fie>*KAm?sHR&{X_E)0=n6b+c8b-t)llxuT8><&Ubnqom9bU%9Z2w5iOS| ze5oua+QbU!P0!@Y;8YI!j2FpMub0(zd?j^B?2-KF^1EQ!R9H{nm7FmzU{61wUS3WE zQC>y(yr|#AGAex!K_GyL6z_Op1pSG`@SaEBm1H(yPljGHWudtfFW=-3p-kAnAA;6^ zy5!1z_?quPq-i?rO#vA$S~Do;x2v7fK>3l}&ddte_W8cL3B_}Xdl0;c)hRhX;s3c* zHRyj-6oWX$umUQHXfw`#mM?9g@azWspT;Tj7csy}|Mab4hpw*rlM7tQrlYKEG^xG( zd7emysgWFi6%o?%=8u2BNn@BNMnRia$G4Bl5i~KK|7UCm)^%3Nq=u89@(<{by9ArH zbik(jc&|tDWaRZe#QqD=*@`KR978$=7*==tO#Y@Cd+m3|7;Cy73*rsw9XrtJSsSto zmKfJ{{q}`3f8%=p`uDVQG(kmq&irIvJy9P zjM>6(KNT=>azDO*ai@prlyh(GnOjuz4nugC9gb$oYE$7Y-FAtKKMwxJ4se9@n3gI# z>XVFLhqi6w!HWnrmk70B|5acqOSam`dAI=G*c(|>Rf=^3FII8f8-QXK2b1m($p58{ zqO^k#0plEJtwk3kPd@&KQzgqBv$xqGOW!r0BMVg`Y|;mJ=X{Zfh3os{f(Oj*T3N98 zR$(JQ0;|TfzKYSlV;X}(V?x_!9}3Aip?b*I53v?6Ejd}u<3KfB2#lq;#Q}|K(Dp)J z+!T%*adaM~gS%DYS4ug@`Eqrn&WYVUvjFc{Lb(~r0 z7}E&;KUH0UHu2=|#zeQ2&mQ8(!n4)CovJ)OR;MtC4a;_U)cY!n{7kF7?PR-iRp}3H z&ahKAY!b_88z-GNil;ZP^u$o=3;=nO*TQUYb3{RgV7;67n`+rLB2<0W!a*#KX~YZL zeh(1R?tKXUo8l}zAqFCq)B3;2g1ioKP-|2K}#m2V_YC)}+{f+~!{; zhWeQ`Fd7g7OO8s)4Lb@iL%QhTS`GL%V|$?sVpSoLoOY6yrH|zz<-wYZx7K`dR6k>- z8p~gYoJ#=aqfYfU1`vGWkSB<&5DzFY6~b7TgBcP=0Yk@mIzLjXYl?j{$K6x^uqAzX z55Y}s^OcN=)HwVpx~L_DJZ50M2!w6^Y;k9~D^Gt+vb#8Gg(yV5>B#oH=(jv%D`3kR zUv&b)m3ZX1P}~dPt!r{T-zl2#+5R%13dRbt1G$E84Lg*DYi_&fiB3Vk8CV{tcs*sq zV1et*Vh9EA_U&_1V!cp&cbArsUvkuHdtm7A;?!)cbq#oyy;+d92vjRC53gb5F+@cJ z_@bB~6sC7UK%!KH84T{xmG8VFCsuTk_ev|d5N`hwQvIN8MFzaoOR-0uU;0B2Wdv9l z?L2uMk$(sl(+3A2=Cq$%w9z>|HDl*9gK0^@mw#`Ja7p-g*WK?k<$SR7{6K8GIx>#H z@OiN$ojON*m^Q#`{@1)$DTD@s@xaZ0q2zF0qr8!aZ7#4gH)L|&nC@D=Q*x#{^wc> z+27EApS?8|1~uH`oN?++f>coyZ{DS9`p|4zc?)j%h~}yu-oQyT+;$`%Q@qGhOvp>Q z!2tr}hz#!KrF^Z_LY%WSE+?nIkMrcQ!M8H6YNA8N(P`p)D!JQpyZmb~OFc~_7{Qab zH3Tt$1Fqj7!q%{;;Krc0pK3G6d$L@oLOd**|4%-Sn!gr_A)-xeeIQ>)IABz`;!Oe3 z00|p*mV})NgNuuwfHJTj8#gS z{HXm}?o!p`aZRY$W#I=w)+EUCNvR*Pi03ZZ2qF5R&HE0tw;n1PV(Fir&yEuf^^@E3 zy}YXJKlk?Q9no+1 zoA${f^dBqA0zRk38!pp5R{c<=pHByB*Tvv@@Y1CG27~J43{#w?bYJcGA4 z`M6gRbS@lMWiR6;KfkQ#De3n9`1{to+qPQ>oqzMO^nKvqS%-Ceo~%~YA?j=^x!FWw zV~9{=wqo%N3BxZ$w53kF1u@Q3as$#l(UlH7FX*F+3W?&(SY*Xxh~OLGQ?>UPe$oSQ zHFNfDNi%1PN*S2$>@t==_%UekGk|=o)kAIZ(D9{j`R+StR(51-`U05@bLgUt-#s*o z)HEZ~DL;_*c|kYAbH&t$V@C|UI%LmUD9GD@sc-tYB~%mUQw)|jL;AN z?hXH)Afoob+>%xznErCQF;~{Jq(SrdXpmi5msS~}1|BXEONnupydTVaRsCT_eZ2u^ zroiA&y3Mtg1)`ejm2xxMhITg+q-}37Oyj^*xaVwUT;G#K;J-+B?g2{yPWDCKF{-rY zUyh3MPW3JD?-l7|Z)s}KQ-|UQ3 z3^*ZB{LUZTMl73c7!RFXhHigi=~E=+t2UuS!+OZ`!-S!s_7)Z0O*U5S@IYaR*NI8{ z==(I4gr3o+PyrCk+pyM|aKFIGKyuNn_o29iT=E;XDeY{3wn+k&?5HTO`^^Xn*nP@CkVQ~4z&gs6CH zyUUZwyv%ADu4;=TnDtfT?6z5tl84+;pUhOdBN@%eh72pZ)oAi~WYfbxEAeSWpe(-g zj4-na_qA?Hu7*cq=puq)MEcduU$N$XJuHUAl;UYxpXL@!tQ;#s^sd;fSo5#T{-Tdl za6Kg=zRj7*%`w(6%(8HKuCxvvIhx#WPb9!wD*h>sARdW_o&}RwZuaFJP+rdL`E_5J z^nEUhUOXnIHnv;^!{Mn}NKPXpD|;qcfw;X2oH2GkEkqG?w!*IeLz2QO(!cr!uFs#> z31*Xqo$c~1>G;rPX^OcJGPOHo(J!%mTvwXvoujWBgm&&K^_Ee-rlV$2m(e~*0zDTB z*0yBT0}v8^-Z6ta=^?1BWZn)sQ5n^u#Ize@1-?f_~0^+NpnVYDS2lSbO4*X1t4t%CA2#y9ecCdWf>C z^-Y+#hEWI8-yGZlk$T>*);|{UM@X{BjPtf|o*|IOR=?AkE0criaqL4M2sEoTx!p~F zod%1%uaX6L*~_Y{)p}aD>`myuOC`9H)14!XWdF{!GPYp>eH$&6dsE${D*v^>+m8?? zh!%Gaxj6Y%(BL^CKBEU#Q7Q1xX&VhT8sgRxjFcE8SxWdMJ7Ewd?clAAAaMm}ctX4| zWcla&$fKRpF830P1>?NPW|!-l=eRDnwGDGCK{8}^*m$ffJXG4X7EF?21fL~w7P&l) z1fh#T{AQBFf-r4m#aRggLk=qVc-~K?=x=`^IGw3tp*ee54s@|%2B=DFNhs4zbJ`eZ zNed@Gk7aRF0HC(!Z%y4j$MAp_$`m1>p>EVzxrOtdDqPJ;O}BLcFObQuJY zwJ${TImQ|3u%z)<^TbPSxv24q-f=4IpA-$YuEv7+aA~9Lp!+oOAQs2oXAyg z-Cctv;zDq{7Pv~ZwZ~Lp|LilT|M+zQ;lJiGMi#x7>bJ0mi=)3}2U&aA$d6mn@i;-Y z<_+)e4bjv5P9qGPxPa+7s`N3<4j5MW!&lz9r85(Gygob)ss+v}vlt zsRtE^=+${r_51OJ&y@Vy)={jxl~4&<6wC&ZmwUoW9~dRdb3(g|(2dnwOpX1=$Gtd< zr#vbUnkx~<4JvvDO)hxdLZsy*Zw0v{ySyO9YL=8!*SraZfebXRIH-Gzo{CZXoo>R*J}ttUwdGO@*<@4kcREIPWkUsanBkC*8@^tF!ju#0D2NLL znoPy>sp@d5McpWfcoF$*L#n@nI)wVECieI7kq01IENM&g7{A{Tg-D??t#wY9t_w>v z-UX!QJvIm{8~@Us?q$#|$?djyx2kfqS4P>l^pS2vfs{`#?bF|IUQ&{{l*mrxQ z8@vKUPs7)i&s6tlH-FOWy<96N7@!`oO?%>d;Qnz8*UQ5ByD0Bl(Y?YJb9Dlbofhq- zCw2l??yA@3h%_u_fC^yZ>USLBe}P7smKn!8?4h?8L_vR^wqDIBiwSY(p-2pAv9>_b z%`pFs;&Y^J{8X~U@U;VBii7%oY#VH)L>-t_i0}okjxK-0)NF=KK3;t1;ARlAAL1e( z00T>6!ZDhAv_Zt{TS2a2B}z#Goc81ny|$$7*V0fIL1ml_||m zrrYEF2c-ZS9^4jU=e^>LW*Lip`FvQH#Z66K&j|-#we0D>fsp#^n8X&j+bBDLF?4;wdSXUl<-|A(r1MeG8N(O@3k@qQC4wDytq+^j0oSmECrzDix-u4ER$K9HX{ZT2Dmn5Q_k;rXM&FRLV=$AWiFa#t6 z4z^4g8=kWo%TO1XiERTYXZ+hXDE*UHqXxQZxZ18eiG%Y@DJ;;EOdSh+Ic$f4dZhHq zP2)Ps0t#b|wy^TliiQ-0Q0$8|EHVK2tW$aEl6FkFx-LR*me@RpWdkw;xTC`|yI}&L zX-z(>G`=^^Ul3fTq+#*Dg>_BS6Zw+^dmnAqp;IHObOh`a;6eHy{*CAlwLsEKIL>V2 zr;XksdyZuoeL2RpP{j+tgmiM1Qdo0fvw6+19Sqvw)9qxMkjazjm_*Ashr=Z%B~3-- zBT#_xFDFbu5SXw%VeF6aTjehOk^P*@RBys=J}JqM35}&iho|1Rm&yN`8VZLdUPHLZ z@D;$Ta0`oiGUAA%YLz#>GBRQYl58Pk2*{*`;`v3SI?Eyc+?8eh*0v1Znkiwei0@s^ z3#@gXwD{y6_WHm-1j09Hn|Nfg*kxI$7?_!Q@|SkJ=zjCuA_wp#sXc20iHGqBRSavn z1`^`FwJ9033(q&|hzbFlwJ_2`m*UeR&ak-uY-h-ly12|lW&<(wD6+^^=6Ve=HyigQ zidzmGCHz`a7+?K0iCb8g%ifp~N+5FBQ)pjHZVSyJgn#aZ7sIeTM6%(kfjN82wDd5s z%a1kfdNiax%Uv$`emi16xGEp|1qZx^<@_7sf&GlDBAO^n{hMN15tkY|)Xx)jzT8%z zpwVEw@hBqvo20(1n7yKJj0nuIWe;*I@)Dn3Qd-VcCn{e){D;1E>(8X+(b zfz0_aXy)!sCkVYMingScY#PrXWDo;EGBOzbq<~u8z1FDIA(pO` zpM^rIVq?SKgA~s(?<6T=fZ3-%tsTy*o|~hP?Ck@Qe*_B>q&Jx;H~nH=iPXr8Qn@%$ zjPqMl>)Oy&ACHz3aJEu_lW}2YS;;1TcEpArjklx_v?R+=O=CZEuk~dHpv<4-(~llS z9?Pa}Dj4X&O>)|XiS5*@kF?v=^h`OR@VD>LAFnaOaB%`!AU9qElvG|~N`oh1_9vc8 zK`m}+4Jstmtc_}1=;@^yl;%vifKt{RwIy#d)E*|{;9ovO*&UvK4=*mU0j4>g`ja8a zqbO@%%p1cL5LA>deBCf6x;X5d!&GR-aFQb^Izwh=xzdDxzkM+-Rq4}KpS=)0cBsIYqbRm<;s@M<}R`?GqS?} zmn&@jU;gtC!cd+_0Q}x$nqzobSa6bP+n;Nl%_!{3vt*BncrS9m;C75~^1WMTQ z*E-6^gmz~4yZZ}omp8vrzkVQL)M#w1DL94MSOLJaUNnQQuo&spz;I~IaHKzAbVnDT zx8>9PVLu>4{E4!GYC&r}{kas~XA8Osb5hA9OI(M^B2JK+*_8b7_n8ieidl*1R5dGUP3%yRp8H%pxn=0G7_~C z1SgD2|!HBAWqRIOAIcfh7%ijM$U<-kOv^0QbIzY$@z5S2qVNOZlw5pY3kOcml;NJAGn6phn)JX?=4JhmXEOq5P zLf`3L$*EG8M0&fd2swco&B;qU3xPnU``m$zPf_t#J<;BvxXxZ}ZXll@Axma^BCm*% z$Uovwv@^T}IRNt-JdNU4|9%^@{MF!*~cs(+qcW&^Tz$;s9 zq0)L*d)7CdjG&8WHJlrf#z@jCV%+-sHVHi!BO{D#ngttJe^V=|_gJJRF=PKuH7reT z)`lLz4|FO0`y!*w4YAhrM+=N||7RN1HxKGINnh%;zw+YTFaA*|kn7;BlZ<(Nnr?r% z@^nv9r~)i}zbEY|Z>Ak{;d7#)(m$ws>M3EWQyFW>;B(ta&8>bQp6Hx!bZvNMARcVs zsSp@77n=#!rWd0pXLMkc$@+a+v0u5z0TuOkrNkjgnNUann8H=okHm5LfWtM&?lz;d z-{+mgeHS@VF)N$If40)fAAW!P7xl03dHs*#g&nnUG= zoTaWnSK4{NfLo=|zxkqkDGFal=(You&(yjK*o zCi6Nr^LQjTHutbrfpFzahVYoYSG^qA?P+(KO2ebPks|3+BB7z-L$2_dh$KDX_|}`_ zjgGuurOM9vfyc@|fd@w&`l>`6#?O6Zj%U6~{ z0X#Ry$m(pz0fU71e&{AaQnG!}`wdYMcOvb|^0c3C`17=a_HN=9FMZG%ydt4JNRj`r zbr>WWbiiSPM4max?QEQHV zoyI{e1kYnn%x>U&esQ05L`h~2{NJc&{3y3G2CJ_IB&P~gVt?9~h%25{2l!9l``IS2 z52~HI&1Z0JPy#Iye=#}~I$eL8PS8|+#;=+;XZ1*vK|RW5K2+?A(5eQzY=9TW*M?Sr zz8Yn>!Ie4Oi3^>yA6JeT@#hl*Bv4Sa!-2`gwJL-C{T1NHuod)Cg!41AVOvzmIg4D@ z&x4fk;fcS@JVRD9W1rI+HWED0rNW_??~IxzhQ!`0mZIR7#VAGnw*rjM(xy@JeMcQ1 zaH2v_@%VY=$Yn%c`xz9--%0$pjB~xIH7fKnP_^xg?8R{bvs{D&L? z_V56*WWMf9LJ2&PO*FO8^gJIa@CBei3E3t|)z0+qoZI$QOvlj?BCju9EY9JHha%=8 z9X~EZq6QU_l0?!j7;>c(F4D9lEypMVqwswR8vE=Y!3EIMp0Xg1T4_dVSYptB~|#tO2fhNDktXKlr@@@3qGl9LxyjDc>95lj48jwHcUg3 z-llZGV3JPV`!w&<^8)GB7kQ>%QLU}SRIva2To7k|B&+RW^)q_5mNPf2#FyfZ_c^uk zuVG{HvrqXQ>!5)uvr%zDWz%m9-q$shWYC0A@NN4&)vmQ4He4&}8Ic%Jw+*s-@jg(# z)9L>2%<lVOo|Fz{ji}Qm&p1&d=L=L)EQ@<|P~sp_*(pvzq~< zbLZ7g8UXjml#ktr4y9G*S?x7N&yWY&r|5Z_Hs*~`_B7rL?N?m>vbx@G)iQX!|tPW*g9 zcq&PjJfc@+XOz!t=eF-W&ecVHxV+mHp3rK#YDbg8sTq9EMl*sMq(3*fOMR6}TrWH@ zz71Eustq446VCSZj5fRqGw7I~P6?gwNrkC;Ph0)=^J=gQ6|=qT%c&$;S^OWfKbh2N zjo=cjXWy=IyVHua`U@7lm1f`=F~`E?#U`M~vIq2QCxp-dhkh>&x8DWBPlV9Iz&g=; z6B|OfqE^kc@jZy+2&@D2yDg-_G3Y4SHpxKieEfCRiw;hz-7hBEu9u; zVo#MC!8@DoYV&#g3?ydcBlYanck;HL1c}YPx#W7ffs0U8Ck}jAex^Vw{UK>D2?W(h zgefV!g%ax6Z*+s7X7Xv}^TLB)adt(hn^W$A4IreXl#NzJ#Qq`#=P0)eJ3dmkN9>!e z%|3m+@nFzeQLeu9(2|ZPc(-PAgJj%NNKFfu0}n4pxAmuTdwv*Y5MQY_iAS*v?OwZy zPVOu<6Dn2{0&boPCs>Ky`ie=h;)>C-yqGuVD_^4d5~qv|`C53m_5Zx3(XY_Z=4~zq z5j7^>_lElDSJ%(m_;mtwz}e2VWC7?_QHk`S=pdO%&=dviZx(1#73b5cWlAzctafj`Ar-y-y#+Tyv5tan%7KF56_g|e(PK*5@%oYnNLmc>RDe7FH1e}^hH zpI;iq2VV+RB6zj+i~cvNN-|raYdDdT62ZxB&z+Y0&kxw}nv7|K!A$xPN;z`SG6Sbc zD+mslA4RF@?=A10;#Ww`JkCYh4*dJly#2gsY^6x#es7C?2c5Q+N6z@`ZH+G-7E%3B znoxI|`ft&rru@A(#!>-=m}1aCl-`*4L;E;8tDu>+yGd)EnTo;8Yf=&`jmGD=1|Q)L z?ufZ9!jaQ&2E_LvOvN-u2+&-Rqc1RMfEW;VJ29#%1jfo-OxU zS?ghvx{1xZA>7e`rCiK|=%vyW-eP~RsQTiyf^GBSwMK?8GHASQC^1Q(PZo;ek=B6e zD2~Dkf{c2uzWe#eSk*)B&&p?FmErC}Ye|g~646RY2A8kCOy$OfzIP3ab7E=$#CWJK zL)(gIXD=cvb_mc;9^!j+MqJkViyJIIlJQS4Oe4~r;4pagyUd8 z?z%1b^RB$iF55t1keqykab&N@;Uhb#P@a3~?QGm~(dxZ_1GL>lEVjDxg3_fVPR;dU zjzOo~?Km>0ZtkCD{e3O{UNwAef zt`6=m%R3kWI_ucK?XT*~#{(t6IbTgg1lEHS8CxJs#{o8! z>kAN!bU=yaH8WF&Xa)|T3twm^YHz4-d;8wI0s(s#TcJHwxA#{a6p*!W!4rcoCWq-P zJaU#W?)Sa4+GAj`g_yoAx5kgQ*DB0x!z75{J=5W5o`(ZK}tC#3tVYGIu1yGjtnm56=9Ff@Eh`WjYkweE2y#j{7ONQc3 zfijS_>|66G7kTrH@gd<~I58tGRHN1a(?uqeERWopD7rs)uY-<%l3SxkwuA?*X`EmeyhD2dkBC`za8aX8UWwa2|M|&h9$FIw+#$k=Vq*A<%z_ zhLf^(yY4PMV5-rb_awdtL{veO@6$F_5J1c=cjp$PwbfjCZHA@|E%Lg~8mT2uKFnP6 zsX31cl~R>Ghny-$d%(MN>~upRkLZ=0_jshqVIu97un93yTPF^NEvOV(NEVrxbNdsQ zb6MLhS!Wd0>RT`7$xm_y+g@LxcH2KgD!T&c1sUrR@t=3w-d6~(aM`IcBnFZ1{KlHF(k?a@2}>hO|^r? zq|s_z=4@dAD91cN&socNupATr3Lfr2kd)R%|5{?Ff`a#6-2pvUcIl|}=wi$XjAM?Z zFSIgmRa0Ee3Ay6ji&+Sj_?Gc?_5H*J`Tm_mVkX@0xXzPaN03fJ3}!Uxczx>Z5C43^ z5r$iemSq-*J>Ft}M#>Ig=Lit#*oLuP1y80JtWj197Kn!*<*`9t4M?KMqRaWliPA7% z0!3`!%t@8u+k~tT-ucONS~bTGw~tMw)!UAbg1Lo4pL7mFx5V{G9_!d|nGC266LQTk z7XTN1FVnFdl`Q4@TwjdXiT@gKZb?4Bz0|5c;F)b!{$djOZ2BKVEy4WJ?uR~R?kuv5 zC$6ha!OMTji{|a_WMb0i0YXIXr?3KoBNl6U1u5H>2RJ+f%$gkPiN(#lE7wwc)BU-c zyNrivF(;s{Og!QmQtJ$o0-Eu2eO>@qK&QX=Xjk~}<3s$1T~W$s##MOkZ14?9(+CPD zG`J4?bkyTiYY!Pzf3i9xEv3|aUdu8TmRjUu95}L6JOHf#s9JUQ>T7X@2=l*GM;yh$ zukrr$a)k>?=2c3w{wYq=ZY^&$YLLxWjkMF0pE48cqwI~G+Sa~FYUuHNTN9J}D(rf= z?kBY2O0Z*4HuXw%Q>z0W06hcBzmng$D9swk4F!v7NI0c<9~NpW0-gCWG!t`%aaE2I z))9u|R(R2+v-g~zO8#kzOV*pS*m)^Azd@!Rh_mQ*jK5nWmx>x@^le&DE}FBPn{iuz z0$FHU(hxH`bK=^U+q1Bki(MdYQpFsF{lzJPWfid4k*k|u>Oktf+UL623BF^{7M0MH zGyKqiAYTji+C;LSitjAlmI6Bc%R{79$KEU}iNH%{W?s?y9q%+v9l{#PCt-VA?g}?% zZ`y`TYA1Uz;ZXAl#(Y1{4K~ET>OYXk& zY!dGmN9jU#=OLUPwU;*ATtJNJGJ`#0+W&c9o}Ze(=Jms<#^5e)ktlfITAYmfk7qSz zxzBV@5JCbb>AL+m4DA|D?S^}Tw0IyswB)41?>qrV4#;LdtX3&sqxOivreB>o;rdfB z_@^kc&}`9L_|$EoApAXEqqL@(1}gsoNik1&(#HM4Fx%MJrL(i|EL*g*C&1ZKe1mQo0|uhHL54!ij%r z!OFg53Z9#Q|m_I`wlKIPa?aaFZJHAci|Qs0BFYZ|SPmMTX5g;w=_< zVe!1i$LA#tRBfMl0P(j7BmkBr;PvzEstZh zKHO)mrAFoj3FJe%#r$WxkumEqxiY6`j|j=Onfs9-`zD{05EfCJv>>78T>{v-=l zshVlP9ja+wD0xb3Htf2K?{+$A%H2{x71Xjyaz_#W&8DGQq$Y38<#Oo`7-vJC?r4CmO2yzn(e5fjPv49#$2RSJg zH~;Kz24=NAm5qCR*M!+~a2_-iQ-v03`Gd-trF?QyCSS*nC}`Jz0|08?O~!98R*@g^ zPSOD@q-hQ@D1sMQ2;5MAJQ`H=5Fh1;0xu(fF>9Re>>40qlBUa_0J{*L>l5f8hxGTR z)?%F;e?B)SP3<~+mlP;K7g!xEv87Is+3Z;yGge1|60Yg3(=^?1V>`AK3~?um_XZZb z7YfBh({2^mTD_{a3cuN20J%fnxSz_hKmN>{pON7q8NU1_PpMnY&=p_^pBDQ zMUW7PnWQQ!cqeIMd^$DX&6MO$0?qnvk*Z^vDbqb>QpbTF7p}XJ&O48!#;kPSAjY-f zO%pU99@&1{rtVo$F>rxooMHfR_lW>Pm)ckd1qP?+Pm3|M1abKADVfz4Lwq}DU4ycI zKjEjrum7}f$F+uWX(4i_8=n5tgQ}T=`&k?uUYiRGC~=7kw@#uzhYCftkL}}$Aa{5= z-_{e!=U2>AD?E!ugfB?t_T@{yJL~bZcqyMMxY5$?){VcQCK`2q(hTGaeE()PBs(@> zT}OQ;c?EVfuTZ`43yczu4v?;7+)M>8qf^xoQM4Otsn9dKI#jn`9Yb_S_%CfQ7hIuT zDD3JJjVgK#JjOwsdse7)!@k&jRm26t5G@X;5lo`~jaU<@h&ZxRQ?d&P5EVEd)sNb~ zC&GK}zmo4Ug%HFLR>T6|2Rx8>H|S9aRx3nI!&aqTL~^bzG`K!2T?%iEff{R zD&={1LaDNkGW5L*ud-*`BY$#^yHjgvgJq2@#09bNL-OLbeh)K%rP8k-%F^#QXj43p z#B2fBKUKsD0D^e;#1DDtM`?@!eI67J;;Ge(5lyRfqw9=4=?5>p!BMV$`-&R`2UQS6 zCZJGx2o*1RcY5m}-0GLDH<%y5bpPY1k?T>{zV$xtlDgJ)erSFI#h3Uf%iw8YN3jKITg#zx z;!jP!Xq10dWiM1G4(AgC4h?{Uh>Ty9Ac$wgiYLMQSB`FY6uk8jY5(Ib^q)@Ig?c~G zO;As>7*S*`lc$SDS5#u7GAk>)u@h*4(Tw?4#vRR2m(Is}j2N1?us*=gltWvCyb~D< zK2wP+#cg8ByJ3W#jL|SfKd#6#Y0*<|+C(Br`)5$|cJw zi{p4#O)IdCGfoDzwpQF3DCM{z$yB}v~*_PcpAp7W?0mK0=4CHx#b!c5>b^p z*rrRrJB{yiYl>eYrxnTYaz#fU&jI?qldT!>+%xHhl|OXF4Q^8@y^QuPKj~m8i5ky@ z4Q-i$E7BCc1ak|UiP=N1Ro{&(?$y`-WK&UtOrAYsN%Gp(yLwFHCI?>D^Nwfd9!AMg zBr6X2BXKgRNhuqX37|Z})}vqQDowEY{LyLWN7MO(K-Krlu!3%J<~rV@n+_B1*7`1-%2{$D%UNJF|nuc3snqp`l7h*T>gl_;iL|FNlrkBh2RY- zy?Hj6XFU>rS6l^&KM&*}t00EE#>1AWsMft9d8Peo8i(64z$g0}O{Lgtu%f%5lxMMD zwCHgx4a(prZP8ds&LE4wYu-`@W|O%UQyKeYVQ+Q-`er;3wgWCHC)K+7cs+YejX=ztr2Dqs^CL!-K@Cur!t{J&{dF)-BkL@ ze=|0>ldNg9ll;bHWrr8{>^kD+Yw0ptyWSzZJ`{l0Eny}EmyH>cG4wT%TP2&bLaI-?O5@CuiczLgQ>y)*90 zN}FKHNmU{6X^G`#;8AX9i_oeisHA^_rvwp(r^nf*0Io|*1s?y7%j@x(3JD=kG9(x( z@TxR&lCveIG{ntiYaV}(=T!etKY;sPXG_UQ%mh?#f2`Q(_+>$-RfCh0dVzF}rjbCh zu#}bvK#!=^eirQf-1Mq~#>=nbh6+H?K|@iKygh`-cMeBo1?L-VIV*H;4A3RoX|OH8 z33j{LE=T-IE|^g=$HQY1p1Q;4S(bTu-4zGpiMmHs#yjL0fdOpm%04ig2L^ zsuXMEyv6boJEJjzO$bWG>2eSTH9+zRl!nVtaKTLc*mk;O6F=U_DkCe?_5DKs`;iy^#XFHr{-TUQysV{G;b^#A`X?pC>X|WjY=S_sR7Q5aDj9Xl+#h zyhd!rs_({7v58P=p@?Cy%n9S(r=oM`NivN}=3K1%;4K&NzjMKpAUSAI4*^WG&Gayv z+1Sk~;U@Mom>2E=SvOfvja$X|%8@u5Oao<6?mH;8OQs+R=&YfP*7c?(*9?U7F~O{b z>3L=N*t0akfxzykGyndR_xctZq`gZjQs14+kM%AlPkRlD2U>K0wW~yWm|fE3pZ^Lt za(mVZITf8JvUAYmqH&^0Rj^L5TnPvx7 zF804IL2hY#Z8e`Kn!@;i9p2yk!EA~&p~bX`3{U2najWSe@tm``RqC6Lq)^YhD5N<~ z4G5j$kaO{nuPG=j7|w%PB~RHnw>dhULqu*H=FffWS=tp?RmM5`vhaq?mBl>Kfn3L=q{;M+lyXcj0TJX zcPIBi0n+sQy<~+LEH1UUrHt0^ke7VoufK)1zxBAMGCT!}FhPm-RU)^r!9nSDpfbSyhOu0Oqfpd+Y?g}otHg^*XHXG!CR zSNOMjfWDH*qGDs+BP!UVx{}kYI;Cu{forvASlN5zE_rE4V=S_*v9{bFq5>>c&KlD- z#CdbW`5^SdxZ||zCL!q&8n&(i+A`#K`Eh5c9$fCeS8@Le|Ibg2MH7B(k@PMWN{;l(j#mC&%wp6i)GLZ1$t%Z;P2}SK@?;?P}7RiDJ*k+mGr_<$!(S7W zt^z79HQ5H!VAT{fABqTAHZBHQKCsVcD`LOgL+)L>CQ|p9mQ_GT2O;SGP+ip))bfFw z%SKa;%x`)+pa1r+gWt5m7^)+;4rVFwtXM+#|LfhP#17g{pzM)~x9UKu1KkPBF4(w? zdtt@KwJ1<}{`7qK#(noE4T6fT9O7?wL$pG2Dy;t3|r zI=)_(8)BiW z6PFdg*8RU(%b_28cM{ZA%(hR@FYT&4b^Msrs!rDm%v{)%J#*YN)HIUBCaT)NX_?3R z21f%-8tEsW%b3Y7GC%#b#A4MXPsp&YMb9=i#q*Mna3%QB^ zV~=Ebna_2Bq%)(T5U;47>w&Dnx-tN+HCu5lHhbLSJhYg zd1Oy1(reg}!RLsU)o82`LuQIX*emo!34G3#DPMYENC$-+-S@3H)jLk=AElD`- z>k{V$xxnTST2i=jn<;Txq2uSyODWPF#Nq zET%nv(xs7K>BJk$#KNoDdEHA&zSGF?n`P+4^v*Q0P*|7!~0!>gez zRA5H!CAi#R`3=hwe!hr< zuU~;tJPnj@j^(u*Q`UkT!E9IFNa$Ev2-u??G2slhOw2wI&ea)lJfGpf*Hq-8k6BKw zGK%_mjvTT{&F{M>Xni=qN9`KI^FhD<*ceLGBg8Ay-xNKc$`R1}hzzS{Vz28^!MjKV zw(+mmzh3{VF}hOBcZiHHRhNc2WlYjao<;Te(yB5;OpyGkP646fGfE7d zO^>=BGbYaAcgmpOE+=>*sZdvIP8g)A?WGYVhok)dAQsx_Oy+?S zD;4SsFI$c%&|ZvCw!!w7O8)JhJiT1+!ba@zQBrq0OwRO27nw;cyJT%3&O~?vTePWb z)X2YOV5XR(pyEx%#>Li)L9y54sk-=UTM;N zLV~;*y6|V^rY2O1jW#U z|K!~(Lig*-*CC%OA68kUO_?*u7^JD_9obB(Uqta(XrU^sZ^Z^sEWdC8y#w=Z!TpJC zKJvpYV*99GINh>Z>;;+VN|IVCm+wbeCNx^1htT`4kvX2aJ~b-Q<+cxBmYYp{ti{*PiH?JvYKWoMTOLWiP2u z!av6a*?C^zn*VhQCEtaO1F>MIk-bmC1jEz}aPErxt)t5%OL&5T&H&+9o4#@S8T;0U zKyiwGERdAg1(7+@L>H~zR1sy6wp8>E?~r8}?>eR6^dFBn zI^JyW-8whW*x8Z!xqdXM=pV3rRB8|L%3VZr-wz8-=^SgZ6p&{i1k@K-eMUHleYa1T zBPiKYKONbOC_&P?fy}#+g}mo2hQeAN%qr z3w3|Svuc|BX;aWU799vGyxB|M|2gQO0##KSlBb||eiYG6&}oQmD3SW4T)sDN4k!L{ z4Gf^2TY>W$kUWb`C9TqxJp>I&w`|8}6+`M(ttxs3U5~>Ql|2K#@_|tQjVgLPzZz8Z z44i_qu0;D-|8z#*aXlKa$RVdG-$wQ*L3A|#H~6eGc65O@0nF9-Iy9URWhHC_;6 zh6gT+1v4>n5x1#vLT+MUrqKyJm@PbgoK+9ItSgT4o32r4RtZmV=VuS>?AVd zMIFR%{3ef+A%X*3*>BF*p9&Corz|{DLq6p*rIeL6cHquJ5O}1i=iawY7^Jxw6zDLl z3oXoT3;Y#Q1%X%w!tz%Lnq1JE8Dl7euO$nF_v#6lQ^N-b*T)dEAfMI(%Pr!tPD-bf zkTU0#%_OPl9odX8{@c8XbD!fCq<4NdI#jjj&!n%ylQ%8zOpSrz)jtG%ctT zRg!=_+bVho(No4W4)A!JBAa%xobrZfq0$W-&+)8bzJH9Hp^fMG$`&3eDtZTiE!N95 zH%TcKPo zdyB)Z;`Y8sWc1?UW+A5?m?w z#`U&(R#Q<3JW@fNfr|(jq^YykH_Z)E7LoB$%v6>uy$TJ!{%h_aJHHxK^bCKZ*(o1? z9XO;j3l4x@gbIiJX;aU;DPRU&@x5{eAv8`4!SgNx7#%8Qy(jCGrM?@^7|8B|T!~qE z2q#s1te=}pBE8btR*;A_c_Bj-mw9--Kd8Q`5Gn$6il{NAm{)EV&ZR1FV}4K`$ zz})33_BK@X4)4=K-#?A(kTDEoZ&UBamR*%4*vqpRQyaZB;RXW&pL^_Ra3WC4<}&s_ z?4FS@B=bq{&Rbv~PW2YHOmsOiMY(7AJVh_LjV?l%bG_g@eDfh1|nyz<0*YG7|8s0emz1J##qt}LjuP!_&+J{Kj=`0tD8YORLgoz^37hL->JKhSXl6# z5a*hH^sqgkxKf^yRP>LpBi^bc;6fjYT?nFXhEhwB>ObR17^2|NJq&_g&^z>jk;wff z?(;f?QFr1%@P@-y`Lr2HO9G)!3<$tR0|;N|gXfYZ&1}sTOs7MnrftyjV!2i{G{{)- z>0o||1QdvB;22`+FoP;+DuO(_<9i_A1vvs;CAkPUF17qh!Ps*i-rd=qn1d}2GOY=lMKrQNU-L$*C($^6`3p6Flzc-Spwy8EEFkiCm~v z%vZ3o5!()=0DlaL`?aX#2-ZTyQW@*fd}3VJ+wn7*?b2ja2p+AXJqjNik%con*S zRE=HP{_#dqp5|}nE$^vo`4%#jVhni!7&^SlC_R)9dOq+%qGb9HnJxj2jt3WMQcHRJlA5KFvpcP(i6qEnb-9u@YGRvV{F*8=y%WBl%EB%QKypo zeEwJQ$GlnFf2wGmV&EThE6uumZbaqOl!kzY|0c6;i{>&-T)bxf!9+vbV}vfd-xY%J zD%Z>1CT$3k@a5~kf|>7m&a4qQ+(tTN03kNlNx|EZl9+eoN1q{SblY2nYG1JQx&Fd( zb|MC@)b*@k*WPGwOh$~XfBV=|nvnReko6nibg_{khU_>um`C(EYu_}3ZGp_+M4TM| z85U!gXE3IG3w6^<4pocYM98m4lX{j=R5pEK60+!eWD;z!pi{`cS*!vKvm`<)Wv|B$ zA5Tu!CkXWm<^h7XBJQUc5#7x5wZ%hEp(^k(Lr2xA^Po>>0eiU|v5~H9nsxb&%wuT4 zd3&44sz{c8Lzw1>*C1_vR7x|D64A`-LCP^rkvn5g#rBV`BPh`7TR(2cViE!d^0ipq zG3?mno_uRuV2^sc;nl;Nz0|m{qXjD0z)&Wl;P_#FR-U^{SN@!$D4EGJt z>G!pdaq-ee%t0yphM6|HB60)qlwwp}l|`T}quPw+(-ci{nVH85Qw#AbzOtBSnJL@d zaXn8(F+^GmX}HpbV@Pg*3lokPd8+mz-jid*;QCgnGD}Yzo1_7dj zV&*(6mLR-1Ehs9DQmjx>TT4pbH*#%w(Ki<%LlZuP+NjvR2IkZ{ zzKVl#rc?ZuHt51@OJF&CI5>8gZehL*gZJ0f>#fUYBD`f-$;YeM+xAfp8s^O^snq%` zPV^ff&&Q;xuX*2cm(78{ddsOnT1-xRM{T~u+2-RHL3l>_F1S3tE-$wn4WJc$wC*7e z7bc%~d6BVoQ}j5gLD20<(B<;-o6LMXf`-qsf*CeEHN0*ZdQ#RT%-&JM%(q}ZD^83; zZJ&t!)!7*efO`y2dIBCb>*v}VqcF$gji^`1wvF;&#V}@g3xC9xiOD4sU|6-0YRE}4 z|MYQ_)6Y#uh6E&@U9j%;-b&Uzv1-`s#JQip2&Wus#j$)s31xFP>0N|*V`B%Fg51F) zWew9@>FIO=J}*i-!u;W_GP?d$Sl=)NpH;Rg+pG}VZmq8fk2PfKx;8znz=*Fe1lv*p zbIK~TU)n6TvXSeG%~md8|M-5$Krt0s*~SRlhy}sU9;Lm{|2ha+ca8Jqadn&{4)eD3 zvrrP2$MSw?|C)eSZJwh@C&h0nG0kUy0A>71&puKN7HQr_p9*^)cm7_T!R&KV;xVR~@7*&S>6_W-Z@|e#qT-B;i6q&Z<`B!ch zY{TUlYgS24yeW%a;g?xZ{UMdw)wJ1}{|J&|ilEOt$z4p{ysok$4cvb3QFIT*N}9+T zgnIp`f)z+|13JsOke?WZZw*X@)P^7v6+IlF;u()-(nJ4%7h!jlj0MF!Nn&<6Bl@;) z{@SrJ95{XSS_vh&Q_tH6{C?SP9k!Az7g7uw7)@fkt|?8gJSF<_1bfCw(`O3N+M6f{ z%2B$aUi26C2@GV;gom5e*oWr$jzr4bov4Do7&I#`wkB~h3E5|)g0BGlPt0e+)Amze z+8;_1!v0JEA`t8u)odmST93dhqtw{8B74%|ITP8EWM|+tmLd_3)%F_E~EU41B1TCWR7-O9xX6la_>cT8_L8?u$RD zB~GE+s9va2AJC1f11>@HM{LZyRdv9xmtZN6QOJYMqp@AMEKGbfjh)*`ScFWLYwJJG z@s9y1B)d&$TwD32qZI+-rmU>?p&tmMpcE-S6n zb+C}bO5L;A4nXdZ*3gvG3?VlZPD}UaqU#T_az{#b_vC1FXWI{&Q@yj!xa~%3Uum-} zH0%b}9mNv<^r~!FCYE9vASlU|?BjE4^}P(EtdX1}r$VxnsMxqSh$}W@F&aU$n}pjU ztruaU1JV-AZa^Oo`7~j#z^jo4c!+9s>AMLvKcrF^D!8BmaB^VzMuzVB+9F}1CiqjF z2&wiCtY}mx)|2osF$g-5N%ZeSP3iKbWZ4hh4v}kl^q)MOv$-+a=BtPs3d)Bm^}Sw_IE7_2FtTHUR4EHX5-At~qbsJ)B1w4>q-s3jdhwaF@adr@*{A)!M`k zS2o-Q7|ucp$WnxXy;YSVF9MY%9APd)IMbK|dn|hx0wLAWx(nJ@?I^4?6e$dV;_#D} zDQZdEO9)moVW@I91=^Zs*7}bEq<;)6k zLN>xnMdAOo_lraXDysMMw{2~QL9JC!f}N%>_CH-1*lS|#@{xAmQ6F8$$H%)xf-Jog z++HF2eT~g{B#NzeZ-OUf4C+ki0lgX?i<=ppU}A0}@7u^Euf1lGBi8sF;dUPbhjIX~ zT`&O)>lnU5h%pDZh82x+?4c#^)@PDEO!8r{usyfu06kIY-99tcO#;BJ(*>2*ql0-CF|-S??JB0A2BwTj zsCMm!B^u8PCuVK{_F*NWpkDpKzMZsSS+3Qe4fcl$wy~hNj=8Mz_J1YnHLbS$)oWQ{ zH~adt!`@+X8bAZf?|&9*`gqGQT!>a1(JoTC2@u!DgI7r4^e_VH133iy{b@P7MFryo zXJ#1uRGDFJvAJJ7meG2Aj#<~6-iNH?x;Eujq&(F~x3u&t1s2}3|y{hrKZXpk6TJ)coSgeoR-vv&wHv`qO792p(`@816fBCN?)l95|vT6_Qp+B zQKu(9DjRhpm$eh=mGn)F;Ev4-Cqd!8?|y_75H#n#wCBjXdMO9Ye%r$ zGBSb1iq%2qr(hm)m7nA!BT2hwq0sXM%r|!rgILelC2;NM%}-=SD`H4GXLLF?Ig@DvP0iWw ztJkPyDPQIHQXvrKUq&+bF8^xwVH1d(sn-rZ}kWEiejH0AHF2<+l|Og&H%vg zjJtUcwPUy->7v8T6EjyR@fkj|Kq71t2GzU2A4C|{Gn$#Q@nPB)Kd{x*u%hD+0UScM z%6V5P3U5fKX3iZLCt(==PA*YgBR}j;sSg$TsZYVb#k@I9usmxF>o#o6 zd!{Up@~Z+yaryded-(#!qVb8aBuAuU*bgsR3sJ<_b%KKi{bff!4%XvM?nrwYq8W{j#dwF9N@ReN9HIDs^YT;w0?k+ z);bg>%6d5Q+~4CpP)xaX+anOGR8QkSn_Mn@GY&On2?9-2Q~FR4^jD278GPm-5Gix; zDkWFTScB%dnoSxDfS+j}pktDY&qfBju^~zVJCwy^;1RVvA=T8=@qolyX0x-=c1MY zNEUSF9TkQs1)~)#OO5`GNv~ndC2#|Z%DfnH2X8HTa&YD{TDoXiI}fKzsNsMxQ2U(K zRzLOaq9uZ*2GKGt2Lf@iRyCvSj`*Oa)^0r|-YWeYLDc2V^2=I(7r8p2LzEug^)KJ7 zojZ$xw7EZwAt!aM$R`5wo)%mUpi8Ovu2su-Jlp|3n)l}rFApqw{t;DmtOe9Hm9@#w z_=6Uyy{a6(p{DYJL#GsW9qFby_L`S86uE~P<{!qD(RnX^J4W0z^j<+(%u zjtuo5{+Q?yDU0ak2;8e~fboG~2w>LZjDU=R3*RpwxgWLFUiOn_wtv8Th%wh!sz(26#j(^Mhw27J^!qYOF>kL#BR6-UQf^CiD$Rd+dAKD+8j8nb$4S=FMKBX=k!#a5f%9eK)@ui=u- zA2m%e(0`m@42@UMr8_Uf1{B@aSj#|lTnjVCZoD`q^ogl}@IK)qrU)e+&LfQV2j3$Q zscF&D-K`!Hm7(d67P)T!d*WX3ooVA0;lxpmhC7Hs5PKsWBU$CMJQCS#8w}P)99p$j z$?mD$jRdK6AiziQUnrYk9W^ylOMB5le#htT$~|K0S9m*l=2|kpLJh>!`TjE|`sv#s zF_&+FCR1R<;?|{jd8Kg=V-cz*=ikxjbRO%oEFE<=!B_UO$YhcAyg{Zh4UGzq`O}=)H1Gl2iRY9RVB8MtwsRj6+vDV_*-u3YDbG9!izlVK z_Lr>{*Rozy__BkLckrUECLuTp(ie2iR)UGoi43D_FNyi5FNF(cXM0J<`4e~?o7$sX zCdCw=^18uer{tZ?GS#k{9PMp-BgZlvL2iEj9@cCi&g& zABM23sJc9==8k$zaJ??p!A3=h8la4f*^Sc8?h|4dJWFA!#t9RAb}H$ zTfquH7IwjSn$XM>O(1BliKuU?3UAN>5LJX*A@z;Gr{|R#o z*9KAHLT{NVtw;*Rn&jcBcv%oS_^d+TOlRu(h&;FWmj;{(X~CR~M(tthJTs(|B(kfm~mvW(~#e)C$kzYOB_1p>?W;P92iL9>UIp5x1 z7JIu(ovxcKkm`%{&m9x}c&Zpff-ZeyWZMhb4$>W%G$PC!)fEVu%_SE)F@!xgqbK4% zEi3Cv&p7FhzJ-od$gbsH=zUD{yywf?<~@h{+J;rJj!< zfzy2DLKESVIe!3$6|}@v9u6vm3*Jnc>bu_KxQwoFGzA=;5fM|XhgPs-BhxfQG0a=; zHa24PGOFB@+F5lCelGt2u*-F+dd6CI(cpEEgb1@`jV;)^8e6q@3$u1JJ#1K!BYzT@ zD12%ABE?+L#OJWSVWP(0-i%6i=0c(#+k(b^sVa{-)Mw+poR5l>gqYF0lNNaDx~I&~ z$?m@rqGI&Wi+&q^%8zqWaDg&G$>a)&H5;6%ssS0gsCmFH@`)bkB?6>tc?-N!@*$k( zGH>hh#oPGBA*QtlC#G+oLg^Vut~fhEu!~5(03DsT4YV80ZdjX_Z&c=s9|H;OBjKEP zRFQg)7&t51dTQ5%nsOv1I#64P(Apc0zzm)HW&-pt5n}-F5GvMKVwUGlq(fAyDvXoV z>HmzA^S7=R*x%%ct!W)17>>d5HRVP)&{u;%R?gm5qh(GVtr(c=(|w*M)LHl2x|xm+ zkeJb;J9r6R@U^$YNO8Hg$lw|M0I1tMK6}+c0?gZy~DV z{L;NIr$Ck=&6V@|1_l?!rQ<%RPe0GS`V9E?_0yZq4ebjyJTa@8?og{5cO~u3i_BKX zeK^o;D}7Yo-{nh~jCFH-t}*Nr7%;Hw)9;<8vhnRWJpX5g; zcDVSW4W;EuD7-MmOmjAWQLaJv60i@b;94B*#}4kskZ`Zw>v+iCNv-AxWN@thN)=c2%bAl#As+^ABc?I=ftHf_&^TIEoV)ZcO1}^!wUMPa9#tN zJO>#@XVR)6sXX>zq3k6==UrBQJET(%=I{OwUys${_6gMGH1|r6Zpv5hUTVl(+O)Dw z2&J3Z1j1BSP?kMd_7All^J>d)5)WFh^4_1W$L9-v0BPQ^usESEn6it`L_KFMz*mL4 z_d_!8lR{ZWUwOKFYv`lEyMF78ac`6t!0cL7a)##HwBQ)r|5F;8gOSfPCez<(AUgUV zusr#_H8<-08z-?e9#yFr+EZ&2*e8+ixk%@C6ztGk!&pPOcQe9H+?d~RaFlW1Q#7W{ z(_(Vzthv0=%@OP`6dsIaytE(b^K>DH$~ueo**GTlMs)kMW`&V~@SoFz<@rfe3F9zm zuY6+8TQYJg%6R7Y+&bg=W%xumw;{09`tEtol}=E5@KT9i7XRyl#CUYYW6tf zTV%l@1PJx1?)_b8##=Am))iN5yD_YhVx9;IbzQ2&w+PM|maGs2oS3Z;&4U$oG9LaX zg2$yWsVN61@H04q$%q0O2EqW03j(AnMiI)YEo&=_e9U0n>paL^b~`Z6nMvi|0@NP%DYe!A*L53LE`{2%lAkudptqzSNJA2? zL91Tt%g)CMF^y+PQxI2TwO9$-=?9B5|Am!hLGs#iZIBJoVMvYj-36ZPvPM%vOxX|f zy5X&@)28Q%NA4HB5H8U1QUr9)m8NzZUFJ3oCxiX(GIFTPBxFWw`2~v;M4!{6!ejc! zZ3#a)Un$#1Rzr$zY0$#EMbmgQa*Vm6Y;R1)cOh%CRe-1A*$II#Lfq_^)l>clE{Fy< z22}CuM2{MTA>|>abr^tYJ0P0%HJ=MnkthRXb$o;Z8aPJF$ zj&(S60Ks+mzOF1D&X6}NireyBWJ&xZ+5kb`6#m)BvI7Y*bvukz+GwHOzNvKqbfX)c ztab!493{7`E0TsT&^!aDSQ=!GpX-MAiSXK{=yJ2wE(^6_QW3#?k%&h@7Ia?2;D!vL z*;5%%ApsRE3#oX+kH!qTYc#iji)8k*YbJcuHJSS=XRSP8D&&*b+av4tfFgR>&|M*+ zv0;o2uC#}AmyaMI5(IBdZ}8m4@@F58_Z4Lgw4MOSjn}J+fUKZu3UeSscMQG*n0-rchNzUP zf{uDFkE^j#g3C8R&sFz79BW7}0h4x?&bbhz<>ir}QZ9Y&vknX*WM}P`zLfFnd8ZW& z;`^A>x~DA>6PT_&9$t#160DExP!roN@&9K>n6)}Xi`@X&_9hDcOl7xYlB>#=1+ZlhZDBa@oCI|^^A+W9G297*q>?Sh!m9mub@Fq+U(2zdRwJkS%M<6_SX4)M~^z-$HnS0t=bb0=m3vb zv=#DET;dA@st_1sO8(B!LZSzyq7)=1+{v`%)LxuQ7pk@N;2KIx6-1A+4e;8!p{9@v zCh@MNsDfk()F&6U>)l@|X1v*Upe|JP7L_Q;+vPJxhp5<0zH)hq2Oz2gaa z<#dWTSjV{9E>&%k(7mF9l%e7MOjI*Y#scyT8CiFJ5@9h7Ru9WUq&@h*+T-GK0!RN; z2V)}A=I7yHtX^uLbE$k1!g*2Cd2%vE8kJ(t^NN!)T17G#jnZYsj$Ao8(y%^W?TTr? z->3yF`rAbl0L9$d4IlNiJ^1ZA*m0a9t>}Hd%h`#w!P-$j*@j{TYka_yn57(Dza3rP zt)miGdVx{yxzTd*;h;cq+$$STCZ?UW6XrVUe=D(qmIK!4+DKrlyKkoOL5c&P?H4%z zgK2$h^g|z|X|{>xf7cDZ(XX3~kfHaq-89%%EvgzW3kC?Ah>s+@_qbw(MGPx>v!J&M z>S8Jlv|s^`G~X3HO+TGw-V*^~nQfoy2E(j0!+777zu2dYa-Dr#Vab8+2Eh9Eg2*mC zS5D|~05FoqQPlA55fRxHR^x?#(rss{zQik*ohc#q{!m*n?K$Uc`6zkZV;KEF~Tdt4(vNxfnc-2_mRP}|#`|D~ADcM-Om3ofH0BZiaon;Lv z2biF(fgwzCEf>A?>@{uyV*^b$Ac(EQPv##`S@WZ=GwKM6OOII=!yD2l`jyIw%;U$VQ}t+kF2ut9&$Ebo52eCuYb3UVgS*8B*ucjnfe7>em?&T zo-B1oPxODs&38FzhqobSgK580#i)8kINg)u(^>sU#X~t#(Ayv26qe3Iz-s2_-j`h* z&fl}3kK~tl>VSt9w0>4|NcFsolUuJwxO-ER#6@2YzIA;6HJu@S`ISHOMmChigZmM$ z4H_I>?${eou`&}EN?z)(+`?&9WUn4&75KifukN=Xoz~Xt?h=fmhCf{E>@qGh1x4$f zF38f;aW0P0R?A$=CHB?rlcElKEIdM!T?}125Gz{afud&&mVGWRkOdK*y$`oR*b9Gy zLz8WYrSF&4%NCBTrK!7RI8+X|(($|}HX7hQ05rs{-U`*x5+U$&L9&ZMVyr@>ihVVo z`@o7Sz`ro6JqTVr>a;r|(=gGn>F4W93>KsJ<#3(Y58}m4HjY<_WhE0tmqOOiqbJjh zx1Pt*oB<+|UOZky6Ow-bS3s!0Ub2#$YRc?>U7`S9LfA9ZvFK3=`N#!O%iUL1_5dth zql+;{1c&Sh ziBJ8v2_%enhI*b2(_Q;Y-CeMnB{`<$MnGinb`jpXH2YD~+v1$;b3zenK&l zA=a*=gK(mUc$m<*--Ry-$0-PxPM&y)8rxmPU+d|zXO=GbxwV|V{!s6{i8Q(IKzj|c z9lOQvi{M)<=B0pZH^4U^_8Kq7AWsWJ>TuJwchxbUIa*7_8U>Oz?DyZTUN*U$VZPfW z5iB3cY{GeyU56&-$QAa0O^^WKvD_Z5EVC1{tKrK;X5KuqEO2@T4eQ81-OA_gY-qI` z8*dhotd)+{*zP$oxfqpOK&yQ8%zj411QrPrRTp-6-Rt2@XwbPT5`$gb*t#U49Jk7R zDL~kvi!9siG?jmKQ|9?qnW~#%L3K1758@8k@}htpYxt=kI^w8?1JZOM_fia&0VQ@s zH)k2eLzl5#A_~+v<79by4NhbKo`D8LBEp|zdHh?;GHoIP6S8yjJ1SRkaRUYxa1%7m z0Sf$hcyV<*8&Pp_Z!rbR3Hyon*=yS>a^Oy*tmuebMdRwZLBfJ8$ zn>aBI%Ep^4kCN;1lDq$hYr)`DAk(2md~P{3f`lAiGU-3fx1`Lqviy~Z5N5!Y*`j10 zOBZffB7zPrYr6-sflzJd+u4HV#kC#+@g`~HSCOC%b@25e^Vau+C+M5g^Am}EV08~uMNE4qzhW{l9ed!uuH%Ow)_YF9%w zNX>P&>DOP7Ip~!u-N;)~pP<2{ffGSg*14|CM39>z*1mn9YzWUD2GGICl}Ol}YEJJ#5%% z7}R$>O&kVa67dpoNp8i+-g$2D0*sn+iY@1S#OCHmmga7y=SRhGJMYMZ&tF0J&-UF1 z{-NXWx@?iWDvqajev}GMUGF;_H_VU*9|?2VCJxWElLqM7#g9}PebV31l3po$O{jfn zw%{|{WZ{<`D{5imurFXGsV1oP>JrZO0U^HQ)EJCIm$S48Bx8j`ubgFRy-r{cUg(qERWv^1@P<}on6siNI&;>Um z9r@c}Hxb&jW8NHnDpuVASota3zI@~gx@IeScWa;GcTz{#u}e&~p_!ia!KYSfu`w`k zXtz^n5nGJ*mLpWq!yoXhRz1Me1kBEQPpQ{Q1nn1MP zS?fH_XaZn!4xWFi!`Wxj;I(q|pHQvY5`1rJLFl@EFy&^Ow0zI7_d zv^xx*+lEB;W5)sLf-i5yA;Ofe6xtX&4_3n;p)OVYl~?FQc?`Z#0yK83>N1!NGboh? zhNse4978qadboiI88TM?^iV-A`<$J;uN_*Tvknf$?xp6)cS@&sCC%=mJz3vqq1a;& z<{Do=6hPI5kSK^~ToxydrAfzpl8*?5PDiO-0mHECD9xE;;wW_nZ^8UDV-v45$J0ks zFuo$98L4lD4_Hu$uDie6#u-FMcq^5y{iXGnLV~bxZa{oVX4xhU)A;MTgRJN-)T(uI zc|@Vd0MH&4(8BxG>S3i#`#o-8IspOn=#SGF(^ycWRO8n^H3<7TWXrbB=9QnPIFMof zrWuJH==8BRQEBr>GWO_BRLVwkFM#9wK=Grf%v2pq7Kc!y%&wI`_t(rE(bc;zb_KOf z=bkBW(CSjlQXwnEh57i{Sd%OBt8kEF>qHc@p%(YJUC;u|`Jp!!`6z}{cF%&~j{Sa# zkIreLq`s98oM^ygVks9z`0oDrkhlb0zX;W8{oyA$EB+k|k>mxxIEbM+p)INTbB;Uc z>?_GHQruw*n*HaC9K~-rdffBeuA5fbiA1FO0*pTU16=| z)=*+LF9%g0!^9`Q!&>frx~V*ydWe;5z~C4mgUyWy+4vjJz~+vvfoZ;9W#@Mj4wYc$ z5Yo!X^a!Vbm|IZE%35F)w=Z+4alnpHqGLzG!)T&AKG@-+K661z#t1Ey6yt`>Hm_!5 zc@Fm)KL5nd@g=6Kv4}@S*?>Zqndmn|X`x1odYU(lEvCPajyIerahp@v)nbYS%s1xTe8S zec(jpjZ-x<|0C3%0W3b$KIIx9$@aN}ggZ~l%7x$C&UIYcKg%vG#LHQ-mA( z)iYL(H;nW0pEi(XL}odz@XxzXX<&J;S?@2cEL>0$Xwg3FLcp6YL*WI526$L)@2m72 z_wDvrAjsr1pITNVaISnEZlNpb%0UxYAES$o`dds9DnFjd$)^U;r%sgF#g6|9nb(j- z@|*ef_(#Z7o~+ToT9y|Re4T33b=|4KyxxpjTPKI3q77NQI>x8!akV-sHyT7BwVNLMXQ-2yZi2ZRJ-#TxSC zWGy=7i{c9>iqODW*I2^prt_|-J(pHrcn^V~ouCtqP_VjD&xyhD5`Ix)I&}B5j>TG} z;R3F_`_O-vml4yO=Rm;rb`Y*T<{@nr-h$lcCd!8{<8a4eIz+1hdY*YFEA}#=MKgeD z#;5!7izLS^fsmcHl>ifANx<9By>FWCERw*&-SS#fYneP9{=PSh^&q-+nSNpovdZ(b z`44K6bA^LqBGWGdHO0rHLEnQ}of}9NCfIzkETcg5r?Xj|A)l6auax9Fy0X;m(rPrA z{@y)_;yNuc*~yCG5Na_uAgvJQn_MPZhGj=uZEy3E3C`Dx0+Yvh0Ej)~j-a*VAOsHI zfTaKhGJd=&5wm{!HgD@11YS3epEMSoNDh?TeqI_;$Ns|_M-?x`F&?(!NInWsKoTfWyNTv0ii4D~@B_wG+h-7= z&lu$r_$?9sZ>uU$-N14(fu`l(RMI((?gdKt}AOb~0C z;v@-7n~cOU`u<-DJuY>&?>Z`|`2TBYILT}6?3zajmy23sgK%648X}v1=Yq1zl&jc_ zN!Ah`cAUqqDbmX{N$&>^;g1tsb$K zV%cXnf%0N-@PlIH;`|r@k@4()4W_6024%r`N8^7I&Meq&PRV2zJ0StUN0PqcNP9Sx zCNwapO^C+nWiS7R@M9FFX2Sx$NfEtJ!0^}$l(WoUy9UB2K!424DMdgj3RC5LamnLcK(67yKP(Cdy!&`xs_0c`*k-`*D!pW1K#aQqc!t?Eqv}>A7&%L>Z)e)DT zpqY~^&~9PN-|i+GkmT!E;^MHf!L$-rnU4AQH*^A=nFGFe7X|!{z|2g^7qJ?TVM2l? zna255;& zuO9H?+1M}Yfj%!LijXt1_TQ;7teVxX-vIqZm;-T(Y_CS?>kgva-zGA!K+_-r-XBC^7s&&KWS>V!f@UWk2;kC3 zeg~Cb5_MP^D=w`f2oh|hpDW+f(!1-R8Gq?q<`f_2Izi#J$8#jppdcOBDSTNwq{T&CX@PFgGON3gI8pCS>LfG%R@VtOB?iJ9u_(EC-17lsR}(U6r;B0b?0n zME8(d6jE2qUCsb#{BUJbcN#3J$TJJES!Pk(GCrY5Ux}!RGS6R~YBWGo0G=PNVRtmH z@@Vck?>d^=n_s)AAiTCEpf#~u_8fg+B=qOt4SL6CyC0|E`FsFLQ;Pg`V9b`eWn0pb zvPr%MyYgt(H8%r<5~n!_6!&caLPfCro^n6KkHYeTNEJ+38G4a& z&w$3(G~}cC+=)4vSCDjW>32FpFJ!L?#M~AeP7WkL49e5>h;k-qks(^DbO5cleYRZn zyhEY^WAS6NSG^JlVe+~K)tKU5NC893%bCf!kYgoUFmH=w;wZ@_I5Nud>y1$uDM<{q z89rhImI$da?M*zE^@hW{c=dOtjAv$T>_vC3537*JOlyhb4@GnA!)>$H@tgUZoWz*l2 zp+qVS5R%f#fWvRv0`Gs8Rw!o8->ZZ$X}L&+8gLgS(v9mhIjN`_jU}Sz<@qYM2in5E z%@NxCLCTZ$_)MC2;^yJuqrt%`H3++BC;Kx{AU8mq&`~!v75PN13Ldb4c;5rs)YZieu;{)FW@c!4w(l(PD z=q1zNW4PT=lXVu#Bn)FAzJpztD`9nL;#Vm6p7^Lh&wv!_{~=o5NZnu^9dNN>}(qylDJL5 zP0#~*-Kl3bf5ZIpDSP`%2FqJFycAuoi ze5h@R*aS|T!6~ZQRtVdWL3ReKTIe#?PTr8Zd??K0w6_g~@NSx&1WkaeR8J%peuol_ zPZRWP{n~9+T{H#NOpseG^ZU;w*GJyQm##1T7Bv9%OHTP4I~AVcX+m9 zTOA%sSLDca~4Lwcvay00Xx9oSEs6za2!Y;oDLlUqJ*OsqT~A(sN8T&6H-XN z(zCI`+hvmtz<2cAs3RuH;1GE!o3Ttqaapvej@#>;&*yl{M_S&*QIpscTtCj>GQ-7n@z#RP2-vVYrO} z-q`6se4M^2ZnAqydH}7;@2fdSloxFM!v8vIRDClgfTl>SG#Z?Nr=gVr=k;yC}|;`;OAQnd`!p^=ygMMs5EqpuCscIISDU|fzX$NQN+4{MAxqxY0s4Q zfvUo6l<(#+#|(Ul^>WLWY{SE2aW2(+n8>o6H$0dyoPN9n;F9ajdFzSI5>8w)-2r+k z$^L_p7Bu)R52<#1naNHk)E%h}RRNY^FUf4ph{bjpH;;E9b0=a*vQ z*%#}CGH1h1&S`av309#zY5~Dxsh|CMIr7Rzg$LNwYXhyR^WuYK=aY)sW??Pj`m^y| zPh0a*8)@9T#>A5sHAKMton5lBhY^bY2koC}{q{rE^{xN2Sa+#%0EC6=zf%sSj2Bel zSCCc1t`I!)L}Efm*HZ)!oo1pj{ZjE&)=oXQ^t0WJLGRC#U3U9C1H}LZiShr?^8Bc{ zCslDOlIQ<%a@=t(I@UIuB(CQ;sWW%&Nr> z#b^-p=0H+gBM-z;mu)JDCJq?xy?$N5|Brzhdkc>XV>@`L3GuiSf0iiK%$mPS^%|LZ(r%@ zYTed4RXpC2GXYGZd+Od$@4I|-0{zq9m3Bs1WP?jWrSsB8G1QQA0|PVyPM?DL%UG=Zj&|5RTzh5F^tjzK@7CS zd4RrLY3z2M+#NI0ko+52I@|)CQ!172DyNVT9}K*brlFS(QiEt9A9l&f>g;L@9MT%j zv4eTWNN&r5^hlx1r<0!yr;Hq;>d`;yw4}=1v$iTx#v}%Yx%xWC1@n?$rzp_d&3lNP zDy>-+h6K5 zZBoY-dKfIPfMr!dKdN+U=@gqRb@YQ1VeIFcRZ-SS5ua*0H_Bx1Ou-sAm$EJ6+~-1G z&DQstpQ4ow*DO&>7_~^ujZM)SNGzd$e*qnsXb8+ZW&=9AzZ|mY5bg&3B9q!*#^~Z# zaLx-tgLvayQ+|*5#>-i#)efknqAN;jdCilfz*g6V4xzvfm|g}j>Zr@NbV~5|Eq3;X z;(N0v{G^(+0lYw_@i=^IEZIFLU-+-^}KO|9Aklsi3 z_jM@LF`zIp0}hMtVgeM!L6G$kIo1g|+9KVe1;|}}8Ng^sPavPciV1{C6Xn)vN3dHn zh;pX@P>}M>T)#fJK85}eQP;pMlPCn%&Dvqxx{o?y8RavDHB7kcJDbyc#W$8iydCmD zP5!{rE8-Oyah*ysDO+=Q(&vnSuF-EDm4GlIX$yGfz`E)pzaqwxH<21?#s1yKeTHFV zf!~~(qYu(wsA;`hrJ}Ky7GT~9|E4x>S_P3?8hgnjck~3Q^bJ|I%?5!>AZ>1Do=BUQ zl5xumCP}zLFPx`{1)hgjL1BqpKhUmNmeFs1OLqh%cWWIZ1%a;-43K52X*<&xZg5x1THH2)xh@e1W4Gy>mx#!ee_a4)%)GU;3Kvj5iolw0&oc9 z3^PQbf>~ci=DYxK+H0G?D(VKPb$@`q)?Pb!qY{TDh!N%%JQpU&%c|A3^%xJ6UFz;F z{VEyrMx&S8LB!*xP4jiJ7A;@|$#-4nCGSCoGf)bAnMcC-Xx|hm7-MKKyJg=UbFvKW z-WX_2Vk9I89<`Fo$2G!D5-dJT9U@eHog?2y_L{yuM49nMPgoOn$&N-3%V8TyJU=@- zX#xrQ8m>F!RuHqO@!BG>C#~8N2tDqA*bI3iG4O6@kOAaIksa8tF2FgTjn{HP2=$3Y&PoUDHVZ(wMvS2% zzK?A&gqX!lEBq5y!vXAa8AdbHe6lXZ$#v&-A2+^tN}@MVG+vP<4NW~F>-WPSHs4w{ z%P`P)Y*5tkm($Tw`lu{@?K#pr@Tr8|`Y7K{3p3czQwB&$i;#Fs(Lw=3d)rmTD@g;) zBM#g+0f|6ogM*M}Hf*%HuJor!CDs(jvCfuM|Kt-%LRruAFp#WAI7a?TQ0kR7T=^Ce zj(M9?NexAD``j%Zkn&tc`es`#F2jawtZ+pu#-=J-VOO8WolbZ#3=J_zqj<`Rj`tfX|`#l+d ztj96*CZX&*#;@*t=auh8^90#)4>sXa8a0#a(+)0E`Cltvv091+EE_J$DZ;jnN#Sww?GE`e0B>}(;}5~{WD)(Y>~XBG0PCFJdAf6<4+EVYytyZmJsg54rgTs~(UuKS zlG3I1nBtusyBl(Tm@dt0$b~;-puL>dicP0fKvOUW3Ba$YFb2Lr{3F@I4`3^W!hDP0 z^(?B(d~`j9>*$-$@Jch=fSuoNq|^DLlsy8C=@DodFwb5d8tYf{nA)nbwuF?Ra3ze=-&v zF5@#g@i>iG)ZqtsC%SUKg2F&>-h~R-Fp+Xa=T?s~S~DobwuY*Z^Ejz1RyTBVIG2~C zAFb_to``#!RxzJeF}z*jZ$)FF8o>B15(}B}(Y-QsF2c^P&oykNioXJaUx@gekscwk zOJzUaP>_A9tbA06EAaHvU91^5WSO5MIGcN1_3T;SK9b+Cwm-e1$Ya$T@W_PYy z{#^U}o){n36=EbKVPawwktR)Gs$WCwxH{PrO0Du(TuuX{lbdj{@^>Spqp&qmN>JKo zJo!l|(kg+D+qgk;H%L5_{Kp;DQr{!Zck{VP8+}+DDRMsVw$FUORw>pKEwtRsaZ9rcK}uSrFG+(XWFJK<+1lQ?WE zzXoFEz3?N}{AREks{LHFdqA;|Y@vaEN81v<5g;0GB1+HxLNI;#x`Qs9b;D_%vt@et zF+J2TBg=RBm*NI7Z{j&e`L<`Zj!SdI9?&0aQqtZ-e#{OAb%wd=tg1PkE@#$`vkXe$Gh?UQ&(O`GIlDNCbF&!dJOS^B>ex8-_5vebcKtmQ(eEsFz9yG z_SK;cPZ9m2dsCE@^Ju0G{SL|Vj8P!JcAqYYXhMp;o9PT@LmD2%sMIbWvO**T%zK9E zr@g2p#9jzaeUutj~=-eLh@=5BB^N0#Dse{rNii%xo+-#tKL9iA4?J1u^D z;x3x7ISKoq^>Oev)rcRev$T=5*;S;R0~WT_ENy$*8(jE;o+Ox7Dh(l<)#t}ZkrzrZ z@yZeDW>`UpG}`EIb%}Y{n~ZL8uMvo6n}h0b0ul2HWDO(Y5m38hPk5q7ong=TYOTU) zx2S@*Q{xGf%W4tpE2XC9b6_>0joJQ*o=&eI$x{!YhWXZjZMpPt5+h=~-|wC2P|Jq{ zGBcPB8B6}i5M4ZTw!1~1yqKM(6H=;c+%TeTQ&sM7dV6qjbz;SzeRjm4 zdJxy)p(rZ_VY%{Jq`E}_y{dD^kbjNI?P8h!XYJAsPaydGv-j(^AP}tVr#eXVp#zoh$ib|i$*V^N&2_1l< zYh&Oz&CQjFV3`>IlO;D@tsdU{z#@Rf>nH#*5N7{N$%~NsDsA^)hEq#x1 z@>hngv-H+JJQr|bXKHVV-E(asj2qd;D9toasO`m$H18@53!|52JAN7QmN)RxrCeM> zQ!awtJZk30c8lH0Nt`dhE7$MK{NzbkD;h=cuj0%jeL9Ns7oonaP8@lXNe{6iO^RDW~i(~>Qv=cJGUrG!tJL4?YVJWG3p;u(N7(AIRi<2 z2dt0$n0v{{blICR0bV?t<&%KoBA{Ir@e(sw*x~-g{=FsL* zO{R0WrK#PcLLab@7;Ui)u^8sUrsGzb3*D^%g}VJiteFo=Q~iibgMAUj>nln*c&oG zUh2^^y{|DZkH2m49uga#U%QB}Mr-0gc`;+v#8uW+EZvfnJr%{!MbrIg$QmA0tNUPk zUTK<>dj{JXlVE2rv-+3t14Zpet45swz7YX!2!oc5r1$#f^S6PxyFkSTp>(gFrMJ?V z&W0IyR2RHrm9ccwGoNFrS1^Zo4n_dGn7&1}>$cJXj_s%uiU;$Flg2|Phqgq{&HbmR zO=;d>U24=He+fz83`5tf12O`|@VLsl4mVF4Zn+BF>;moE!dD;sA)YIpiGJ#4BH0Zi z0Ks*~P$Ip4(z7wO)0FYasPkXG7>=yF12%(@E9!t4Rn;0^5efJ{(4kb`i+ujo?Kk(a z=I7VZu>pT&_tnY6y*1cq=TxhdM5csJwudMaHoZ)X;YYl6a~fs&ImiOgpZ3TZCE67! z2MuaBuXADwRo181#w&rytDw4A;mKyq1#S1Bdbp@=!^E1wihJKHf$WP-ZQFWCgcJQ7 z9+6Enw+u)-^uH8NyzeT6S28v=edG{Vkv86(sZX=@PL3zMJKW+7v2_201K#$sJ5k$U z(pEeL^D)5trx6u%ky~5=ElnWoM&fMUXKoXaU}Y78H-mV?SP?&RV}{x`a+zPGa~nb| zbw=b5lyxwkSVG=Q4jHe40TQ9vT~llNR@zz(K~8}^Ur4kj=PJJwS5{s?)3RQT2pFGO8R#fS{K z28dVnM8bH_yUBzC8`_c(eJszmqVP~_vCImP`3oO+p>%SOxo+Tg>pNl;1)7y=TvnlK zAcEj@Vj8w-G<85-C|{SE{4FvBS0F`qqVyUG&NN82c(ZrYzqS?K>HLahi(ep~!y0zx z*>Sf#T@>3lmckCGC%`9_-#~qM73-h+~;du*$-B(8u(H^=_eUIQ_$Hyf6} zo8gi&&Yu2Z!ntO9YU;j8l~1#aWfj^8Afqx$h#=TFwDYki zf4)pf0QN~VIEFa8jvD~;5PmE`(Fjs61-;|cg^Fde5J!zg?lW=va6t$7+TW3WE~1EzzV7w9T6j80Rpcv7a-&D$?3MMz} zDVK#A6#&;xsq^qMQ`v@Z5giuFW1BSF+#zS{Ly75@A!J}$yJ>%E?gCG`G*~11xNGjj zW>Oz^ll*fDY6cMM8tG_nBiLw>rtm5FKZmGQc;i{SSxSeei*#A`Qz776^L zDhtza`_HS&6s9n>vj)Jv*|ZQHgKWI`Ym}*c&^j{B;(6D<_~rJ#3=)EQyD#vboTgY# zLaRByZzhu2@0B}l&y?`RW6?g(@UXd$D(`ywq!Dox0dy4yeMP5s7&z+~wo;=$2!59$ zE5fkn@`Y6~nd33Jv+jNiU<-+qtcX9RPe^yN-fZq0De1;+A_rOz{FVdci4o^4PMDw} z8Jvf|sqli*b@*unu5GNT?>0-OwQ8FkPNtE>(jkEc?L3M)@QU@v z?qR;Txk&PzGw-*P2T?h_4@}kQKCdaHdPU2@u-^aJV+oNcdVNFm`0CRptomOB76w1V z*GyCDmQZZCJ}gi67zd000(~|4=AjDHHB}upo8#zLt7b2Z8hsx$J}V83999L(0If(r zg2cF(zE0u29H2#LUJjU$H}#r04#Yf2v}3ynlzg_p?x;0a)q|?X#Tdlcf<*Kv12rN`LdVE2$XRHSlf@l4J1yf-fYtZ3xW*-ih^O5@>-O? zj}qSe<}Sg}Iw*fBG?~#H#8csQMUm`bM26M@-d7i$pS?3|6*QE96F}NAGGV8ZMXZgC zY64L+9I){vSSwihr-L-236NE*@LM*>(S`VrQXjK1zB7I{t=Os(I+?F%KAaB^!q8>? zrEcOS35sl|oEyVF?%c6V!?SaOmq1MLUjc%yJHPBy$(AeOJX!fWZYuZvA!Wd{4tg!u zGn3UsOEZ=v`aR_)KvC3x)T4RU1f7M(CKyO$4d_e^diDq*;RP`VuS+C8wu`RmE_Kvr zHlxjjIZTZ|Vp|*yD<{`sZ3^nK40l>bKYbw5J@S4e%#UUOi1mwgY0{V8|R!HUQc{% zm}+F?6`0894|Pc^OJiH>ec5p=L*9a4I|fsOCiH+;eh5jeZD+Ar5D~(eo6ZJsnPryGh<=uQ)N_rj+ig!|~r~FiKHnu5L)`#6w z|KUVrYmz=x$7{R6$ALf8BmCsw_Sd51Lwu!B!|vjL6kzXfV`_d1++!6&)f`qzI_%xLW!jdclu%luk(;&%&h?d%TzV zm&U~Jc}&o1TgF;z@O#{IZY1dOzf{g2R{k<U)*mIkd;}w?9;SLLgNp{(M{dk2rizkg$BU=A&X`^riUxpwq`>U~nsh88GfhNXOSzjSE_J(;!5P%%Z}chh;WlX<$}^V2Z0kT0C@S0~+Q8)r!PVzqzr0o5c< zSYb3Sq*FGZfrxdYcn13a6Hb{B=Zi-e!3NT`3mkL>p?J@JtLkc?6s#+6WepY$Bo6zr zl|)BeGEV&07r($n6l#~$f?y4wTqm<<^qe-Z?seKXbfg)^u zwdYnovP9BqEo(!2qU*N6v#w)6bbM&ovu!|*@Q$wk8AyzWE_ZB%jtgF2*VNg;;L@4% zdUMLk90elQ#uO$>*t1qv2#a0h*^{XX`*<<8H;7A-paqb_{Bo|CbwJmUW}&}he>$;1 z&EFPa7o%rMnGMjdL}79u*CfM6SgVW(eMS*(C+^e|sSvEU<%_+bD#%`~hJ(yVt}&rO5CkgFm}4d%6ew2lI^1`Jr=`OQ|lfv&Na} zJ&14e$2L%-!jCWG%gJ({Cxom*pfhywR-M>a$OA9dz!_2KLU7+(Dv*W>S|$h{=)oTESk<6~FH2{ht}`dY5I)`rcrQ&$3hbK%t#YeV-2RdBB)s6gyw4fcLwlAFDOE0@I?2Q$vlfv_?vKGOu3uryDnf`zOjTKC z^B_-GeLrJDonA#9b>U?JSEA3`cK7+GCOsN9xG59;R8;J#7R25a(9qa?R;t7}WY-%n z&{}145%Zx(@e>U3k!P~$#X6)Wqr6H#(ZQM(K(2hVM~8z+rnsuEzKxx|2)Ag zf1PB4T`EB)(ikF2mCdKpB#tH!(cZX0hlk}WCL27Z@Yxy(+wzz}iHODanEW6n$f}K# z#*!t+kpRaJ#Y;|waP+v?c4t8~F&y8cyy!;2glG9janEV;klB~*`E9G@>R4fZX-TUb-;QQLZR;$^0@OP}33bfmK0?ch| z7oX@9+Kt@B)#-SB9-xJmJz{adRkvyoqgEMt-6$5LQA;7^a$`XCcD)!=MT~O@f>+S06t3ZVwPLp*%GoMd#vX{XDp&@bTmB0{6LR6jhcmu#FsmqAJS#6Le#%7C+O3KAu3~c+?u+vgw8=hrA1Aq4*A?M9hSjC z2rj$o%i(U^NdM8nnC~2D@OGpSX1H1jhbqOlV$dW?syd7=1aV~C!;2697yb2D|`pPTgn%IFMJ_$2U$Ale0i|eWN22w%2>|~!^D(> zgSb?<;z}G4$gr~^lBhAgZXM_JQwzLZ*QaGLVht^%A>BZU6N0}TQ6+=&{O|Ol)5S{B zcXNBBF5~*LEYhIEq$(blRd^`u)8g%A>a-k`grijyXJ-ca2^qX7SL;08iM_JmQ~hh60GDgU=Y~_DHjrvwy$WJjsBNml2$OfX>~g6 z?mq8zwf1c&_+qe2>sc_@J2U(K;BoIGKERMP=$DMe9hAFvAj%>qJKimkya zE*c@1l|2&V3gXcV4y1uLT##HXl;oBuejY)&_V^tfNKygbp$5U4%&0QtS2zzg2?fX~ z{{k!$?)vzVnQi!y65`&nVe@?Zjk)svzgb;w=TrX>IJkT~$_X9%GD%PE5t^M9nA_Hm zeif`Y;_24|ec2Q(YT00#bgI$5AjWw02D|N52&0bvY~SS99QJP`5yEGCFb*f$md@fH zyZyZxRb{-qMV)v;6fJ{)h*^LR5J7&g5ShQeO%YpDDUT5 z;`mSq>kuAwslA&yx z)TiV_=zv(%)(t)=M;0(`TQ^~RF;KQOfY0ihmb9@z7&cpb;o>nhU%eR`+Yl`FQ=Wjd zY3J=TxEz=-2dPS}JzeH$#QaO>`Azh(PjhAbjTu06k3mw_(FR%UB3*UnS|hmQkOxQa zV~Udv?%154`I^$y^kS59XzojX;jun^JSoF3aS8akqm{2nHa`u6#$JL58;e2}6<3@#0KFHJ`~#X6Kl;3|am%ujV)BAc-Ny4ZJ|? zE|HY-;%o?+Q)bS4(wZKu?WxW*Bj=KPsS%IH42;{lW9|3O8l?$5Z#rWx51C70d&}8~ z8#k(YD4dZzUC7IW(&wiM(& z%v|R6ia>XAXNv-FH_unGe2LpQMdWW}vpZ~nb+0spLPj+OYKH!LKEGshv+lzQ=d<;& z7QaWQfSpvoL<{r-LAj+JY2}MMkq{h`BN< z<+QXt2aKg25+r*C&icUoJxg<2hgZP z02iT{qk?f1H&lXRi#L1=m|bf05Dj-)d#j1$`2)dF(SuKG?7g20ML5hf{OWH_aj>h2)(En9f-1j*pFtta8(*Bg!x9LjQ-Tz7O@{&6XZEA{JSj*)}^lR=gN|4s%xeqPHD%zFrn z!W|fy-}7Iy*ONFtxxDdMa1(cK4md;vLrYFx(xtMd_59Z`*0!D97WZ&~s8*8d8E&V> z8*ZIex^lZyx3~qPCQGj@3G!Zw9_glGmu#W?@)Yz_?(Y@!gMe3>CJW>p*4Wq783?!DtqAZ>NFVC6)_^ra_SUv{!eseFwu9BQ%)fE)iv2c$p4ZVx8Pn>8N51m^esMc zh#jHdmt|A{T0o`0P~_1=;q+UeDp8O>rAcHjubGr8ptM3nyucwEL2s*&-r?Ix{b_e# zx=C^HZID;Lwz!_2;Mz1jvrr2CQr-)+Hi6QVFj7aEF@!PR$@#(%0 zpVE4nTRt`_g7Dp7_Jw73H!@vrl;Nh!Wqx_wUD`Lh-bWY7Y6W?TCAjtfZOmk0wGRO*z#B6*#B4!V05t6U#6Wblg7e+X_eH z2!6e~HT#gi4~=jpVA%M5MV`H!;>ltrIM4IB5N?){cf1O%=yrQYPEI>lCq9QM*z=u9 ztR)PLH3jdLyKllXWq)%=a!Qi1TtUuTg%HL}QY8@4?G=ULZtyv^%t9Z7b}-b-9r(if z>_XtG8UhcYnk;y6E{}e^!J??oRTk_)Gq*q8RGfn>?z{jdW|Hv78$Py1H$Mtq@ixkP zK1i?VcEaI}u`*O&<3RMN3HTuIrws&atxA|kVN1!$keWN)4p1w;#Vc3#V&^8-j3HK% zd31|VFB;U<-^2c*U_vpvQ)ckyt@d6KJpRC|9qV4}*tU#}t+IzNGCA(6&n^ms2V_JE z7KKg#8d~Xsf{7lqItK_LOD7x`yVP!l#Z9v!+^B>&hFBnm^JG8#F&s-FsA61%9Y>T? z@G@L|5X(717(eb==m7UmA62{uW-edu5tLbnsP7c&l@4 zoX~g>+r`IXP)keEZxvyVX`DE(ot;CvgU-S<%r&aQTu#oXNYfPXMn@JPKS`_Mkk9Qh z+vgM`P*I0oq&zb{()Mg| zKa$@YUYJ@9^RQYyr|>n5z9fpi5-2TWBnx=Bs=LX%>LF3WXf`$4k{J+wx;Q8k7vR0y zcpYW4j`YN#h&K)!$WdK*)ABbJ-=OtSA^e(>M_Kil3}G$7{A!pmMel{O#w_m?g403~ zp{Y-Voh~J2491*eB6BN;_Y~>Hx=ivxSNK1o?sMVZIclPRf^5JKxObe)EmMT{{C*E5Xcc>4zae6o>V`Y8q7IW*c!Pi$3?%4Ut36dBS*dv~w5efNq6eBkj|* z(qo)VsFBlE(|edyGi=Ofr&S2`-S{p;p1TEozE^sSa#%cpW7Jta8M**RkKuLE=Igt zWXk%PAAr~3+s6nx^qD*k$uGKl<)D!okfr@IOZ+!55wVb}akI(PuLahN*PMhg33^8<4+pUh?mSO*3R23InfijBZwTY%NHE`$rxwrIU3?@xCr<_tkKuXS1L}|6 zU$tK#t{M#>+qQ!p)>yaD(#SxklcLn6)@u^r+`ju1hCvFE4sdS#5a>j9-7;PE8243u{~Y zKwZSsC42i-q7pX!a-3!~@712Cbi*ObU7802I;dm5)}Dcy^n=hiWyCbaz+0a(L&dHM zm+c;&v!Q9`X~a2>K=w~c0)BJn4ys-h>z-_aML7dZ7}*gIKY%xxuZnocV>Wz%XYwb* z%FvT}4D$g`hNo@Luo02+h9qH#OjYMZ+^Q`|oRTu1%@$_?#65(N5veT{&33QbH3E51 z7=3++7ii=v0$CP9w-QH){1Z4*cyTfyCteqP^h|iyk4)pEUw*fx#pm$-*y)aptYd4F zZPKzH>Z>Yp&?@JcC#sDu3Y6=LoJ|_NT&L?Z&IAp9E0Fxuso{gDB0+y{UJ2prZy6>a z*-M^KZ{|jc+{JoSudp*?3VA6#{VO&5OZ<$q_Qdt-iqVc=mWTKT=#spMb0exA9>VATp`P;MIjhZPSNuA+W4U7wTVgQFB*_P;f~v*A)Z!pqRnPczFhedUlcLlg7u?r_ z!dX&=8>;R-WLEk}buT*N7~J{IZ476RY&l;QSb=j6O2$mq?M2>m#h9!<{1eN50LxLaZnt3zu|XKmv*4+}s-%p)q#F}qDK_V~j+5wq>aj~F)sx@r zoZWu<^J!8Lr7yItb0ksiyd6HnQVl1qR^);d@XPXScOMedbu{(8pXFrB%M^BNTwCo} z8FU^AV`Md07`PMafU1HmI3J85=AoTv{<^)z?Ud^{Q&lo=Vc%l1=KPP+L?%la8exx> zjUJQQD>Wp^QQr^xd>%-gnKlw$?pq%AI>U}ALZ=}*O5}DbnR_R?j0BScMZ!eE z^TRFTnhYQlEC-6bf#>j{>)eOTPuT~a`h%m6x{Ez7d?Ts7#WdMg35it=S&>_XmL=fa6Q9VAP=R@7cXy5MRZ@Cz z_Mz_~ZheZ9b<_eULY=v#XAu^ZeeesPq*@64cVfeoH##LkYIlzY1C7;MO8X+NK}kvl z5p1nSXHz$8B8@hF$|H)m5~BSODwzRO@qbz(9~3EL?~hss3REq9ROk9uv)Y<&7dX4jov<1I$cUFl; z?vN;mU>FcB97l^TMZo%{H3PIv7wMUs@!3)|x^{53-d_~-J|*XkUcF|^G5t#>nbdeUpt)bo{wR7i(1JvY-8JO(V#iO~L^EdH5+o+@To z^O<#0WZX&JIr9(?7x~s2>IKko?XmoznCy<37|+eTm&h z7o{ED&gQ=g9`MywnjkRT1E(+`Nvgl`iVCJ9yWoAqJ2ro)OHC1hvSbxL|g9 zc(lUy1>e4}l@TidPOs;Gtzgds3cpDTB~j`gYe999gf2F}_9V*C7u-xoP_sZx%HLav zqu2*Lcn8kX6~?)_kd|F~a(KZPa>H><_4+0Z-;h%-vnPQ>!38OB!!`R3aTNP;9; zQe5?`)vH{4ZF;J)Y>|kRb`(;<7o)rv;0!HtJvvs|B>IW@E^McC4uwxc&ZA@SFf$&E z1#vvK3A3o1JO{~MYz9!)U_*Ip6+6fZ<9*tS~V9XOaJUxBA=nTOVpq=5O<) zk9o^3B8Vi%ZGa_zir19!LcU>ojvkRzA}P8#x@NFsq_A*8GUa|b{`!%M=nD;TRHBN% z2VKa*mWl>(>u|QHN|GAy%@Qwy=?nMFS6qJAAVCT1@lJR0@z18b@d}cdsrT&FLLvY2 za42>`0Y!E1=HZryeCGCT!*2w!V82iMWf9e2rhgnkL;ynUjri~1trpb?R{?f zq$1O!-lY{$0rvTy=XD3R3H@Jp--$&PHun5Wviw3erBI=={CZvcll95(WuAx#j#`;c z1*kpfjP<4Wht!xhI^MCFJmh@;6!7bv!ZKmRPfI%_403=*(_-;_NmFBjD19)D)ZFa> zn{pRmhELLt5T4c-ezmQ2)5Ml`zNnZ?Zgj(7&RTxPm#vkbT}-%V#K4>PNIJLIy&BpV$-hJSSgbL4*aP6%C{c4?L`!~ z-Hr8UCJr|{OpjG57V2^*t0BB>etu;RObqGrbtRwmUa=67l5~<~419?^HkAQf9tPIe zn*=AFu1R{Qa85ArcvGwRTk>|_$%@!gl%F>L%c*{sDa~^RWLRH&U?ANCeGR>=$OlzO zL5*C;@uHXr1L~rd4sDOEBsgRe%e}m_0cN#S^dJ4TJiDx4fDOvtqjX1JA7i2ack$gN zDvPF0O_-r=sFN;aFuv6fl_bP=_<-^HZ^eT-^{I_JlivulBBKc+6q_VoCHS)nIvd5g zNBoeMZ{+L6qKbQhuCnTJc^gemyvkXJYe5=phhZtIvhk&&=pJp4YB<1K8y1U_ z(@=%B5OElf`!gjqgrU^hc0f>-+n$V9)(EpGGi1L6Fg{NvEY1BBcq~#|T5?tM)6SK$9!B z;a&!5Z=`Evh5kKp@eWy`g~CmTm8v0~0S9$1{eNwL_dVmOu91jdK6yOGS@2mJa;)+K zG$!RNC8)3Y&=|YQ6r@}6Bij<*Bl)TlGriu5I`-lrDv5a93=?RlCcFObz;n;@;Rj_u zu*PPQ*kk#!0K$v*Nf@C$=QW>u0%k2C_(0{oi7|Kp8Va<+fPY5DH22f)R2T z)UFM{b%X&HQYu}h)1_eMYpfqY*om`aSSp|4Y_0Vfr1Bojam1XrW>PXqVlPKH0x|hN z=+?H*+zGIUTMrAM@DqnNHbv}u8U_?QJk3Ez>S|U^vO1qS#&GjeHWJ2U#V~g*gQD<~ z)WxL2p4`@S1zW{Oqo}naHfV_x3x4dHGUm&^{HrI5c9R%I_oDn%Fd39SK^91Np|^x9 z5Hws!ap^zIm=eelN;OC0$>X0ji&4@U1&9>X=4swILaUW_h=Gz?@y&8gTFnJ zezv6VyEa!0B+9+1?p7GdKtjSeoFkfu&Z4TFOG!<)v!6wkquiEc8Q z*(Q@-cSP89l6qhS^^_I`3?Bt*c>XS8ns>JKE#$O;(#0a4Rlr~tYqGDIOl${0YF@vb z*|o#x2_5(9x7vwQ0)H{sQ5!jXp&}zBG;#d{a%7?aqk)mVPK_mJV+m$2WJyOZGJgj` z8cOcI$d-^C`h6N!EtW*7GhZ&U51a?YmbD_|d*CP{>B%p8W3=AZeyC?xZcrNJ5ZuBx zM=sR=n5)C9=?LlNYx?v)OZmvGCUQ50<2h^J$W#}6MCVm&ys&%Z-4+bHk^c2xOy-yp zOf|!mVMmnVTOAM`{~XdNmdL#_YhjE(K(U4*T@Ei524;Az-;&jDnqy<&uyD*4e6#F`|IjI{{4CRQ+p2^8IuQfDyw>6sDmW3R_+QCA2T(cs|z*;g@RR$Oh^ zmzX2k$gH}3&*ikeffzZStGmOz8DoENpRa~nP_tFZDGI%6_C`I|b!9MS83Y=t8;qp2 z&JL78A}+lk{$SR^-uD?qafkp5b;FcY#43BAB_0oLSVe64K2_y;OMrT;uft5h^jMIU z(!G~gVnPm{Sku{hcV+e1^MEjnbhg^aO;=W*qVu>$vsqtIQ&W;4y%`7Y2Wk|wQ#)H; zXd|TnAxpz}3K`cFE0q(}BBCSq!zxR~p#~0Z<^z0;3b|>FO9Du*>J~%<_1u2RXLDSA z={SlPA51Eq7c<)BA6tL>LG#$`68ntGo#LfEE{91QeD6%u4|k(I6P3=nY7?%+da;xG z&@E_`@4r5Az#G#Qa1_xyU~Ke^1qkmY0p(o^F+D_iFuO^_EQ*)xr-bnu-LJ!ei|dvT zV0X@ocS&6to!}~JfHid`4WVJ)aIHz-f_D;g+br4I-j53ivl_kYUE$-t&qw?25B`0* zRu@>s-gQ$&Ro(t|Eq}k9<$1ksGuVNQ&O-A~YtSUL!=>sKW(_rC2e9_;3-j%=LUwQ2 zUnSs62&>aMe$GLc0@1i)eY}OYCzH_N^@$N>)wBwkM)`87+kbu73;&SpD1vLEECE=< zl$o(**7sBLvlvhxtYAD-bTJ$!3uy!?&}iA%sS;IinP))PlWpMK`|I_%)^g$h;oUx^ zWhxbU(0q$;esG1vQwsDmWCqv^WrJa3j2yhM*S#c07Fz!^ykbOMmccDBI8stUAb_}s zHFSUkDaF~fI?aLx{69@vI7sYt3wexRmI+{yQn!e()vvz&&Pj(vM+8`no2~#lrLN=drV=Mu+`vzhBng7ESspQbTT3dM$ zq_Lcu-Gk(6Il+7T^n+tF?#ZlupM_ zV8#Ai6xe*`*djg>_YHFq`~?kHSn?k;}$TQ$g>E zy~P&ZEnIk=MpEHC(zsP(IQQu90S@b@xWSk(n+P>dHk+)sFs;#wu|=)u5SYU$`tr{# zY@L>mV-JYkB&ZnI6qjnk-tL7~gKAKD)c92PJmf{UY!pgN1j>;r$?D%oZ1xY)7s3#| zH^D@skI_KJNtb%Nr7`p}<*S-u8LxPjnJq@ArFS)aZSrir(6w^A*Fux)jlPx)(I#=H zcj8RSVM$is{~-aa&BFZ!zSw7^QOy5(Vb&l<>iO5N!WkBCHx7WiWc@&-(XOs+ z9Z@~kYm_Cvd~V5RJ(m>7$jUi+Lke_OWbKv&B)h-LfcSk<#%vD$%>Yrq`*)pCJczKz zf~Cizz#3Eh|Eq|0xh%eQB%;@{Y!BTc2Pr&x>~nBB$oP#&UqJ<;@60V}m^@Sh=fz%u zTPV-@fh;}!oMjJDw-B!?3fq(d8N_fa)Nd;BS0l#p0_$3LUVlwzBJV&*Fd;cZ9?8Mj zM(3^?{XG}CLg0_^Ws^F|mse^lk6AS)wnRzV_V&PsIo`UDz~1c_?HiZu&?pkdy)ov0 zVyZHKT86v6n&4}w-qn940m?^wrJ_AKKuD8H@h7@sUUMH6`m8yOxu1*9&tV8R1F$86 z+2k(nGiA`2ig9ng{d%rBqAHHx`+F)yi(7T!S2niap9sHCy7grQEi)CCPA+uOzU%T7 zN;#BG=_dsQ1O`mNBo&Um?EiG;!7Ni#)_j9VPPuNnT5%Acw8K@1bZoVQTUa3?>zC2HYEcDp&w1hSBD)9x5QY; zt)H6_2&x8N_K{yoUkVvv`@)!3Z_)vx;O%OdM-JW^y+}-C3q7#`XX`*Ej-$O~#-*O5 zj$Y}tGGO8u2)}DkcEse(nAFS6$dPg5^FH7BezB-A$uOm6~o<{?9Tp1J*AsW+$9hjuMj=eIimDHHtRt2>(JHPpN`VPqz z4Bh35sJ0e4y6ajL*`qu~&s%wt=!it_#Fzz(w;GaIaU{Q$4vxZZEU6m>B~lzFZY)T7 zmh*t4%%9F{#{KbTJR#--b|H%nBE@0qRy`-um^K$1Ba0}|9OC4-RT(QsDrzSHd!lvg z4G+qwS2sVARLkMOEz;FOWEfc(Vs^oo`}^c0K9DzEcK|1)Vdv4=wx9P*Im!k*)QV;o zOkc8w`NQwIvVFu!7>9=}<#hfSD-d5DBlvpiA~K$mzu3T4CT(*<2g$y#8pQYXwn+Nf z7+9%ec-Nw6MmUaYzOq19`cO8WF*$TL$LXwIJr!}mqs6WK2on2iFyVX%`V{xfD@2tgPI&0J8oUH=Ohu{Dr)a zaACiJl4n6wf0mC;E|>0Bm5%=Qa^Q@hr5HDbSVh8gohoPnAN8@*%hJD(>e;TVT0mX_7a z&JOIn(<+K=oqZz_#X1B#K+%8)1Qe-m`<*KgdHLEngp{TNi|y#ao&>=Ek}nv)+Gsh| zGLKY?;MAfkk5)`lC9uXwJJMqgfc8mumN8FW0`ZEyIWOUS5;GR$!PX{bfU@HKJfC|q z(*hU(8zG~!Os;mpbIjSmXwZ)|g%}S5j!hp|6hmEH4YRa~LgU-n_o1`6O)*kCqNUIU z)Pb7)cTt__j{W-G$Jrs8f4@ZG|l6zvth%Zd;vM%w;xd1 zi}k0dO>a5=$By^0*yNCN&oW;C7sRDl#E}4H(6)D5RwRGHBJMrb>Iu7zFn7p8i!bB0 zV9ICO(WyYMR&ISw?+L4bCt$gp6BK6as@=?f#E9j-`#Uxks{C3$uB2}x!* z8Law93qtX6F4gcrT@XPA2e7VPgF+__2 zZNUyVvkMS1XvXStJ$WVZPv({jrNnRIpFn&e0|i8{IHu5RW=+c5>;*mtA&QQ7?H7N# z(A@#==*&|6v?g93E-F& z!k|3w#D~4+rDBPPc5Q#QzurRF|VUcWxEKeClUH-Kh414dWf~Te;tH&`+UF-(9t0nm9SCt8vdz zSK})%x|_MzEB}gB+}Qvm`=g4WS%a88*<`6=2^7E!Vo=3Qltk$aW>reE2P6F+uK;!N zlPJShUI+dAJk7_BFs`!-J=4P|(krct@pbZW`7!q8_nvz^&^19Ym8E~suMdzN1S5)h zZ>+~o6P#xUDP|qyoe6TaPm7{sfA4v=tft9a)sn@T)Y^`>(--!r^KgWDHvjkBhLcfj z|7uxcjcQeqXy|2B6FjN|dRbS7Z(EU0oP7lu7esCc4u>uE^&oNkN73i6%Us0#%G3S~ zrg1c+>$)HBD>>Cr6Ypmzh7r5>@geW=X1F%JZ%B)O*{_{{i)|PG4}MN=@j_2L!|a_R zJFW-hh=DHTLyaEKBIhD_y~?XrU+ug;x>t?PqopN+)tW?}*)R4||GMiJ>Jb8;HKSH% z%p+is$IJ<-D^XT&Vl4Iy1`?L*8Gr8?n#9!C7}aj*?F=Et~P^FK`J)`%jn`DZ;>xPwAD2J%Zkr?Z9kXD2Jef6~c~%b*RDS_R?Og7QBU7 zL|sj4%HDHMC;U*(8_9THXER(^YJLK~&^@Qw{!M}SI~aFt%e+O)+Zl-v)9I-2VJQiw zr)qs4tTt(6-Y7i3%ezlGWyVM11ZYTfr$ww~FTJBtNSn*IK8LG0gR1dV6L)cCBYH_ie*eZ#_;IBJ}b*R3qbMqrvsnE>u3K ziADs}83lDD)640-s^Q+yR&g9Z@rUIe*&zlDzLRp`s6vxTb|kvdeYe1}rb5bW{QjM8 zyT<(pBm3!bKo-2KpmS+C{K$962L_yOc)-+bJqs?=v=BQCS^YOqlrtUW;MJeu%t#(i z7$apzDw1oI<{FiTz3zmjxO!*Ph&BOQ(Qt>Pa>{AY|o{S*?0d zek(eu;&2P83l>-}%qD$$-hW)-(of*-DL0Qq6)vR9k5P!cXZ1kK+-C`ku(+Eo3!9m8 zc;tN62s{qyLkAJI!r+5+kWZZkJDBwn z-Xp_*9CY(9N8Re3(WnT3aBKCdn%S~id4&Q#{KEywc&9;Ez*tCMnAsM-O<)6|xU0c* z;s?kMD(ayz&FfF!>&A(C&!8zA`dc$*G6B_7+F52DjTj=5l9Bqd>v8Hvw;t&oDmPZpCRbQ~C%veS5dX z&6v43ZrQMJlxe~TzM%w)uaYFyx%68V1b6R@HT$p}pAi#mMniX>Uf&R%-B^FwU@;Xv zI-MiP0lyJtA^eD-#?1`1`nlGy&o^gl$x%3#6T}qa2Z`BHf4+mv^H3iSaN*(7DY9=o z*-Jv$^8%)ERI*&;Z@sOT�Pqi`b`8j4h8K_KRI8s;uZ;YW+hbFY%mA0rW96@$#ZX z@8I&3d*)}IdF~1gSe|V9HLcg^@ebZ53l@OF8wBJRo=qLRz6>lzZt?DbU&h!L71OAP;B4Lm zP2wgiGd7JIB+RXajP;3Q=HTd_p=uB1b=rP_s$SFZ3+F8Wi(``^Oz*c7F z8|^Z2pZbsW$2D?WuB7JqAa6X@iHIm6G{7=wz62bN9KN%fB|Ni4*>bcamc9&r@*QAq^9v|;72Uimt;*S7NMlrgeaT)UUm}Y;K zSCAh^?E=Sg1VXdUS)9AyhEVS^@4vnlAfXM(_0|EN$Xpau{{x?5~fE( zxY=wxu7@hfu)^~x1*&seWVavVMw3z*AE~UInMcC7pQ6ywZ@0bGKAP7oJ_#~6-``LR zMHQ(H^_Wqrd;ec|jU^bOma=%0i%AUR6|OOLitLW_wm7L~zYxK~6q1}rJ5vp#E^2atBwn}R(2M#K|L@8@@mPwVhZgMbTQbgx2FuZ#Eb?)%ft2=J={=y;#Pd?5e5s9BNRw&>cJl4)cI;cGC9Rez-hYXw#&h?yFB2^8Wv< z8+~M~&SKRdyUFa@YIN7%ITVJVF$i!M3srNlwBw!R$Az$_j3u09mb+44^7OXA!4uyP^-JI@Vj6PH*OY5 zVDvx0Lle1azodfS8^N7=;)w|F4@ZHcH+Hs3idtRrZ%|)O>ai%8b?H4Fo!MV*oUM67Dk2YZpi6KwxG`Q|vY@A9gcI6x` z#w7fd$vO(+=DXis^|qmoLawy~N)}f^U*J*@^PrDFeiawsE`3+o@>gD-8g!)Ci9WWi z|2)($i`@YkR;AntbD^XkZ;Ply3A7qWj*ft>m-N zy;-~6wQ>xSeC64u7s`5Y(z__MBHe=id@(mt#-*vXId@4XA%dj{b?de8Se}T<$DMRP z?{;<}q#SMT(pjX@VGQ4OcM~{mnX`$VV+C+19suF%;LlId<Dnx-7c6?q|pm(7$%C)3~s;TaldnXmu2+{cUKX! zYO^drsQgzkni&ACH2`PLm1@zeYrF<)uSdFx(xQW+drGLZCg9}h_2 zYv8O;qg;AF^?|&ml!k-q~D8V8#{?4Ytt9ZlG{Uh}t zHKldHPY2$@dhlfDGwQxXZcYicy4%1>ST>+^C;)$Q;jtwmiXgeW$HU&8eYQoS{`4sD zun>P3sM_dU63=!-8_q}p(6gC)Kw;3|Drp{SG>hPfk<7}ql3Ek6p(u=>`aqA2waIQamzq27QNM|t79pxODU&;t>S`^hA; zYb!vU6cI7kwSD%G6{HJx`N$|wSeF%v3ES&&${dIri?2;4Zbq46P{>#~`x)&*y^~uR z)6({`_d{{D=JcV>#)M>PT0W1yK-hvs{Vu3hel)NmT*r%ripGvT+MW$in;hWCh1rp2st5v14`5H+eqzO{JszOvoB#C{ji_bd#=%BA+wd6;S@F3T$FY>TVr9i)FhgnP2q&CPlse~dj@-#_o>vl5*R96)0KG$I6KXc8;4|dH+`7M%PN3GFq30j zeYh#37Wgg6As<=w3+qkL_e|Gi;G*=>`boIxJW)LvYDCUwOkxnZJS*H!{>@viN!{fLykf$PIu%~@*q?=j|R zG>psoAhnbV)jbXQW9Ca*012uZHO#UL{e?!o#h{WI^8ztCRP2Z9toq_5EcGFHm=29rhf)xr)RA>*eVHB0;pfuecasovt{?Qt|-6Jb?k2KUy{Dh ze%Dicw*es`y?#(kD;Hc+J}L`9jZ-ejh+oD8gj5NSEB>|rOT0Z8Wvn8KGx`!|s=as9 zb2`55!kcJDf@~vGPB+vU57HhNKqP~(DHjW>nr1A$HhK*TG;)e^9@!cOq9aa&2)TC1)m{OLq?Nxjg zV-S?7pPiJ|H5$P!j1ltPRxjL^1HI98tz-FGo<{fsbS1olaH7!80fDusT8vlAid$oj z)UIKIUBCNyJy7~GUXqal=?4_IWJ{gT8QO{iR&{F=>v4_koflzb z&eY30-ZNlFU9nt?Vz6mBio94a0?oM{Bm zAGW2ocRKeD|9FBOb}_`r&B(m3zD__Ojl?e=3{n_AY1=!=?E-XBun$uuYbo{JRVV;H z5*=I(3YG-PGmb@GzA&=tMs!U$u|K)49KHGdjX5SEq3Wy=5t>jsB7ur&eZxLmb*SBI*;6$0PMd#i?^760@SnFcILH8ku2FrYL>% z@> zsv{i;uS>VsAJbuLmLBg`8>tG?M^|IG=3NjxX-PLOtw{T;U8vt({ys)B3wk0{@8VZ+ zZxd+0@j>pWVcFg027Z< zXI4j>!6;}4p&=>M*&W5<)<=+Ynl<$hmqXafW$0t)m`XS|I|x*aw+Z752Uc*TbjZA{ zIjMhp!f^eZPYO%0tFqmMNmsw+Uj}Putu#uaRUSUledT$$% z6><6&v7_muat8cP#jHWBLla5=AfA!E$mHy>tP1?a${dqTL+zVR76D>@I~N2H(=U09 zPN!Jp9ujHh9;ctgquf7*eooD96VRy8JNZz0$i*o)&j%*hUC16g#LXrY$q;)^-PF!BGb;nmNJ(ew*OR~%jMPXO=a$5-lT>&5 z;lT&3K1Pk~pdGP1iDIY?Ex4K{_aZ)?u1!WEWH6%`0sbi9?w8u$-`XX<=e~uQhyUPX zK^m^kjHsGARTe#z**Tfd#3-81`Hr_%;}f%l``6vEdwf-EV&6}U&sP)=3h(Q7y{uPd zDsuPV&v#O$K&2WAyQ{^LIBeqc*0*4BKl%nz7#joKx>>dJFfi15=F&@ZCD?@cj=$tVF9Ik*V5$929gdD?G+?-t1#oxIqLZ!HI5155Z7QlHk)EL2E5#P;x)t|Wsu*fTrRMSuP z&%Vjzv**%LJv+36Fz~cEEUCThrVjfJL7)GYJAzf)zgIDVHt&eOJa;XJ}#bN;cP|bE(2VE}UB}rp=MV4!7%I>A+-!yQ0P`$vCK$ z?%NfN#p>ae03?wMbAQj#^#NZrZHJq+ePU4 z4QV%sagkuW!i%88-XBC!$FGz7QG=Q#Y)VG}74TgDU{{2c#9?|5<}^QjXFH6?*@9L` zda+}iJ9Aiw0Y_QCQ;m0(Bh4x1$B@PyXlB5nJd}+Y=eQX$C4ox5VF#tOdP{|{sEvc^ zprN+sZ^maa`}fPNV}U)qT>HbgIOip#)%!eDJ|uYPi_`Zjgp~1{+pqGGFP1f9Bv19- z0ZuPVY3>@o5T=K_hge#_lFTgPuMA;Qt60W$;FDxia5jvMJiIabSWUNbfXadi)R+4< z)u@Zlg-k46rRq5U4L}XM+mMrZ_FD4fF5%uUA) z$5tNA_a$#Ws@qyyXToV)%Sh&?@8W1D!bMczOsItz{2Tvp)4;jV)X>z$oEncKH;4>u$A) zS};^X+^{I}ZcNbtn(}8%IpkWMoH5=4npNIdj3?h8xc3BL6>DI#mT1<7X)4jgNsF^ea@#0Y6Ay05};Tt+)d=6`lR zjwK~eMr5@YWjOkq^@d*uan*m(6P`53=qYuGt9;5>B6szNq7uuVKcZDL4H!98a2LGvwi!;NpQpm>P;TjS(c>#YC?bi z(6(Je-0L5g*AgRh3F2sOmoqLyoc;fH?}&55#3GB{aCJ_eJYr#S-mI$EbM- z4<6b@lSxyI7*8+bj)J-t*2=f$CxA#F8ITOiUfg5GP<+nzKky&_6zgIs;Fo}HnSyW zSNR*`8=O37UymuHl8LT@3R!S?Cv`}IA>?D3M)PP3of#-@dbvv~{g-gmQ%>Z;MqbLt z`W3{^u^rh@7V6i%e@N<%9dK8QjBM9kV>f1kOB*-yJYkuHn_Kg%aP(@rhqLO+9iizf zxj3|{Cy2c^<4BWxZ$C>C`g#b^KC)z^kv_|n=7}UtHk$7Cj@oG|!hvpCuXnO_cE0%9vdnnNKJn+cq7 z?D_C(8fY)N7}+3D*o0g>G7b_M|`4@nIPRB&?{yZdl4>9gkmn zUpyOW&W8sOocj%!I!-Mx=!k+*-CBOas?3-u7^EqLbc$AR7tE(CEy&ko#e`CBej`pitXt@I0`Dp_GjOKTd0^D`ijB&A9G+SS%`lS ztAYSJK*hgbgjqh3?SnSEc=x5wLfClz2lr^3(j#9mEtU*IT;$sYVf<|cC2=uCV91uS zZmSsx$zn*CyJ8m!3JoE$90?Dq3AeK2_?|Bi3oa_<#P{lY1NxdcL0+U}aI8e?&aaW#>bE8qwmMt!L5&Za0q6{A>Y4Jt`G#GHotAMWh<@ToF#^>| z?&u^c(f1}(ycz;nAK!KhV0@${t9!NnQfRwa3FiTua?&mEzxp5XL1_e-&U8+MIH5b{ zTsV#Aysw%PJQ)?2IoIwxb#TVuzYDQzC8go8>Sj9Pgei*kD~j)TBqSMf^BLXC7m37Q z6m1bEh*Af;Z;b%9t5`h$QrIBz_g~GV4TE5933@d-mgZpB|9tB@(Yn8P!SWC^!Y*{D zk1c=QK7)YUhLg?Zurczb>7@s6T7kMv>Q&m78LXd$Kcg}w=f-h zLdCgH`CKaE#(}8`(iV3z)zD5^6ApHfJ10Lxg^yxLe#ZW=4jsn+-5d)*z6EB5?W^>Lf)8u72rMn1Pk-bW zz+zL_;xz46?0IQT>5zG(<&uf?`embgcilEJ7$d!evwRb@sa^>B%KVFz#fG8ARvh1X zia(jpu*^o@;%?j{w!GxWa*Arm7en@P4Cgb5xm?g^BHZf#1NLrTk!PGR%&ZvozY_%L zx&&lcM=cp_?en=0RN=`H8~hcw9<-tpfA{1SC~LuTJ~0PiYA;XI(ny1JlG^ySV+eSLGR3<$XS4|EML>+gMz8v zGGepb5Z;rIE$tVFv3=M_XB`f{qtrfUMD{gz8js@Xy)X8S4{NRg zhQLuu0xih_yp}hMSgk|q<7~idI^uvL)q9V#IiWWGMyjn$nL46&4~SCjCYZL1sc5Mt z1XP-M_1cCJ`ACx#BYA>M<3oT$iT{vPdP(s&&+`5v3_mUH^KKnZeu~ijEcr2Nw>T<* z@bULf{TD9~olSHdBboB1z3Rv+)Z2$24KN z#q2@2w+f$tf)_v$_nY7*gG&XH;xu0u8Uqx^jE`5m;K|hX6~}%<@z5IjvnsAp){BF8 z8CJAI8~K)NB>@Cy8T43kq1gX2xedXOr)Enzp#4ZkztF`dGil!p#W429o=cr8 z%r!ZX721j!oBefSbWt20_I$pV9(K23ukqZhg^fcVHjHN|M?(5d2dFDZ#^N)AbuyG9 z{Qm*M(?27azmEzXN{HTtZwU}HhA(B(Y6pv?(YSb4?7x(;Rme=Rd#fDxp51>ZjK>fX zJU_T^Hq3<0;7IYI*R-}&nHWROXc@!RLQtTmZ|z44U9I45X@nMPV!w>83=p=~iT7 z`KV)z;Iu+Iw++i3#vX4ZLbTw-k6)t>Q2x%&xFD*;U+Am__P4Jxudul67Ve&j{tA4( z{Vrkp60TO374<(yF0GwzxIqmabJd%~*M8JknBsS7-*JQ6bt2ENz!Tdiyo`kmE(f^m zK3!e%!D>B=r~@2X-LUJIK>{lyhz7UUV880e>!!-{zxe#PLP_E^`0sMU2I#Esy72(o z>@%J|r_e#E_Uum-dtU`3zpO?{#f5x>8I<5>r?yGp!f$o9L)uqp^J+`hM`@m+`oud{^thvZamjKz|apb#920kYY=xx1WuQ%4;tgz@mI@R zRMhFZ#x=o{^__G;j5!qiJSZ6Bo2?}Glgung-R9mVahLcq(4ALH;GzogK9E3H`ZBD)bGTvr zYXS|}d7C61(|9RL`~720$ZBzvMQ7tampp8+NwAtiKrM7mQnqIl(@a>DAaHLQk8hVyq zdzV8n3=PA9oUM!^of95mEp}s$Evw6Ozh}h}QF`W=pCm}}a-v>sGJ)y%)_3wt2?nX# zj$6UkW(ohbZWpcnF+7J&gT-Br3oY^rxS)Q{+U3)o>V7v)NTN%k8vE7LEOIc{`oBB0 z4-MZA)chWlXG};BHo+nk4M zkb~(zmeaeI&lWuRoUKoU2-IRt(Y}Sb(9ZB^E%RQJOyhAz87kP>mrzBFaZR~2j$G7@ zC%Jq(HXLb-Ss*;1HUB|_xQ6{OU7aM1%%5(geXHeoV*Y3fi!IANO3}{-x;MhJ58!Gf z_uKo=;%z-=TJlYH*z8|eVo-#5DnMJP(YrSzlmZ_+ai7wTo@Y`UHyluX=*3i)g)+%p zI}%u8IK-Oz=cV~ip?4ta-*gL39#4Px+f*RL+L8a*MW!75ZisIhKr_?CRCF~E>A_wE zW&oMD=q)%)-5!}bkTd^jTU*_4pOd3ZHgxE_CGeWwHoih+0z6wxK~Sq3`fUgO@qkQP z${jJi&$~X~rYQhH$R&Vj52aT&MHw~#&8}YVp_qvpAfAl}LtV`W7RSz^^j#fBAlh;~ z%*DjjwB^cE{|vc$=2I?0L=KJ@lCpthgCRdTa1?;8$m4-75jrhCZrORkT;{Vm+X@>* zTDM#dCSGc8FO^`}TKy7rR;j)vVZU7^R!;9@**WwV1iEx)E#Dz^k7u{9e#89aS# z&cH<2EsrwB`a4j)=(zEpsAQy21GlAP>6D*FYa^&NUTSnkpfqpLm?_vZaQl9Uc|MLa zxY#@RjX)-nyh&LWRG$l1z{YAz7^NX`ouPEjL_SQk57VF_`Zq0IT&9o*4?#Hg<+5Qa zyu!_T1G{$x7##2re9KunjR4DXxe`oFN9BH1+?~)U@#yCHc*m05lu7I}*&s`RIZhY~ zfYbefHZn>OLZuQ&YoZ-t>0)UxoN?%YNKKW=26Q9K(jTziFbqMypuzZ^bU*&*>!5z@ zxXmuE^rTWlP?YU>>m^}Rp=hov^^Q9%?1+qenp70Q`LBXj29{)$z(Rn42mqrQ6PeN6 z(E^i;xj+mZ@ezvT55Lxl)KVLdW2a^C7|$KX++3v{HmXWg7bj)P7{(AXb(L1a-h_ti4UaW zBtH?8Z5sv%FZ0zo*~=sH?aJs8mOF{n|Nk!f<695``2bpOei|mGdlEQhp_jv|i=GR| zsxk{JJyyUJbz7}5)p|}n2bD2eYRmZeQkMOg0Et}S8bx$CY>voG-0e!o+|C+r6q5J% zmd-*SkbotoWlRJHnY?K=A^fPHy9s6%R2&ejZXQlw;r!6r_}OOAhnJl5SLd46#0^+f4h(Oeejw(@qM~ zTGqPN;ra|Eu}{v{(pbc%EG8BGB6K!^rTRAP>2#i>8kDu4EbU1psvJMpg{Mk6YGmaZ?*0=P|Pew1y_pw1rB1+y>rI840 zRA~R5$0YwA&*L&?{r^MaR%KB z@3L`|`5lXIzA%MTLX%Ne4ePL?&y^1|2Ny904g#@g<}EPD^ZX>*Ir?6K`e;SHifb5) zUr7%X;YcU2#-&~|kGF$bX2#|P(qT^VqI32^nJ&Nxl^)f$2K6BFvM^U2(L4Nh|RENWdNpbNl#*vl>w=-DGPw=bU z67fTge-6Cho@a(x$9D-8!10?zc_eSmH_tE2*`M2cPq2$zwNx-S&H${l)UqmFYuFNKclT?m&~6AC#VWcIk~@p zYBR@Ha`ShV0a!wkoOD2+=DeveUi<89VrR#5uvl8=?&a-$g zHr3hL+vwMKO7grP{W5W?S6u(-8f1idT${;=ivP{T+^!|O8_B|Dw_&`!Q=n04`%8 zWBnse$cAt}moP{kJ-<5K*oE}qz4XT8 zmO8O-;ng$PADOenv_W0-vUq}HWeqST0rG=O2+9d2ZS7ybC539?%c3QyjbA-PsTwsI zm3VY*lS)2)r8b0;ZGbA6piyw13e&KMLgP9vpdI-<3%ocPNwrO75~zJJjc3Xbe%ZI} z+p9`d=pMe3?$sYn{?g&jSydy$Q9Vg;;Y1}6y!C(^D+o9t_>X-vhbakCSt~e6a&692 zf$J}N)V}Ssb%J6GX~~r&0T_(nevKO{B<6wfFD4X(wM=XTAb7PQEl*ibNb0;|>W*@1d#5?%-W%`1zVGzd~<8L_)!Yq^Ln{{;Ubc!UNK;u_dgnh|V zJEnLMIL?F&zgc_L>x-~xA~%xP{<&bk->qD0|J2e?+JR!-N_EH|2Gil;QGTT}1HksKbYs}mAni*;?CThV7d;2kd`} z_^(j;Kj;g76rU?*92-S9^?Hmh!Vk~04{ikcnxnhEy+x7@pqnp?ey##y3b%p^2i#gZ z4jRMasLu?v!gM}5kubOZr`GIw#3Y-uNk^%JVF)-RLS;#-BmAg3bP=0ksEJm-0qc># zktu1d0chV45fQUDA1LfM3=c@WJq<4Mn_Q7)#V*EV{X7<=o^ze8RA2`PT!#pf>=ZdlWvfy!kCQS}r==I(bH;bC!`*ea+;-X;^h{IisN77eRjwrIMuu!VR zU{L--gSfdud)6lFl4(~=&5tr*u6)(w;{BAhD{$;V&9R-}2$?qb_ zmbfc~u>XJbhur|Ix>E#v3C1kqj=WJL)6cMs>vMuutqm$-O7wApCjl?+!8;qp zoCEIi@bsTM>DLG6oEqztFB6jGm$;vNKi2}g7Y_y)ufSLq4iizeEfZS=Ma$u?~P}P@pa)3&mIJVP}fhrS!D9D{;M+^idha#hv^SIWagXyyM zIx7t}3`~&ny8rs&Ia4iL7eXyINEJKI&|-aQQhO+Oe@c>!JxDBl9K9k*7%VT-bX?q6 z8NXWI_^JZVV5t^hKPS8CmB<^C7u?kctSX;ckR91iZ?ewX*uGpM@n0f&j06kC097Nk z(lTsZNJ%BNu;$g>ORs=@$cb^U`t#vQz`mTMJ;&mkq*o^= zLg#sN4XYvd?w1D2Yig5dZ>*vBG!Z8PUP!dB^q(I*s$);t1}t?G{** zG3oUnrg_v5`}SM(Pp@|43N|9?GyThd)Pri-{8V3Fi%*r-zsJqk0R+pWWUQfv$yBz& zg{>MD#?|DrN0ZAC-|^@v|H57jb+1-cL#)QPJIau@5jdD+(cs1=Klvt=Skh`THf$wl zifGV=N&4Qb5^g*=mnYciGJwAnUrH_W&z>p>^+mtV zZ46yDoJ{3R=>zBF>KJQ{Yu3ZxqTNRm*q)uLL{3d6A5Bw`&8u8Iw;fO`BgBYMTjypu z0ry+bLBD6Gb&7%%PE^+>eqQz*tC?ln1*i}680zq)8!G6VfL@PVim(S}09~Ch7LT8# zRNMWZ@#|0Rg{fvB?Dk5CgZMc={R%x8Mbxr>c?U6v&DzzoFO}TfGReR<(>%bgyQg`Y z#fvRl0kq-{Rf;r#>u-O}CUoJ@)p@n*`LfsND#uyXb{4=3bUn@!A2e!CU`*HUY%uLQ z)%jv#!@W0-F7s1gsaoqDpEfcSKo;T1YoJH`5VkeJOwj`ow*X}Ei;_o8Y>@Town z5&zs{cPH!Yub>Orht0CY9~V%6ubNc2L#(*9#p^uSbI^XU0@G@|}lCm8z zdkDgkpt;)GeHyL)${_4a+WraRkS-zDt^fnM2~*ng+Bb+-PWZp~`N8+G;7>%mqwo9o zOZ~VrgX_ue+R^hjTGt~-&m#v~A(=)le}xIAV{~^_LS9SO8B2!RXBQ#1?q+I?7@aaB zx;s^Yn0buJ6y}V&I?%hnc(#e2V!G3RBeGg+s8tABh1z3{L{!<0jnCcdLWsh(C@s33 z84$iqsz|@0o-iDwA_#pO9YD5cU!K(3mtk2%;(ap#v=Of~-?vR|Igg9w5_KS5ULR-h zDQsvMz^Z;B(6jg}CBz?Bcf`%iXxA-ve07mpZz7VyH7)&R5OQ8a9B1xb3{Lf z=dQpX3eu}|x_Ws2=BZM08ex8L+qbmxPCfNGN#I_mHZv|?d%H)r^(;3Y;!NJ8E)+He zVd2>qSJ$eG`zw4TdR#y_AbTxILcb!^MASL_dsz+T-iiVr;fETG1SwG*W7&RgK z_K#Utgo(f0e~=7?kKI)5%R5eLX5EL2a}d(_1Gf^M4M} za6_G^JZ?8hCv#nCbphsCQWPF#1?gay31JNCorg_lz1~o(BZ}^CBHGXNBoZj5pOS*OeK3iL9~i<2><}F zRw$hTc;JUj>{JN4?w=_@ax0SDHpt+j8qC=X{Vy& z1AZPXd>`qRnISMiNf{~v!sMjB>B6dH6~VGhVw}%gKhmhmLB&&6tlceQ~v{C0b6B|3|neM;Q9iMHpIFKF~93pca`_zW5Yv!g8p z?1+h;ePpG=jdR!;D=iRX-Z1iIaNqaIt1RQ|tAQscCC>{5F)ofXl%wd|Sh=C0Kl5$! zM9b;M?uQ~v2YRzbbm^U4IRhY82Q(6OL$r^2gHiarRnrKdU(J7lI{=PKCI(Rhk7!xYNMLWUT;zlUT}e;`@4u?pQCAN3 z8oM&ssRm8#C#>9i{3^NO5pg<_1!S?jqXAGpLG@${vP#0SJ2z?ZQm#^0u55}Vk=um^ zu1JMG7C7dY0X)6jBdWo+>#?2|6$h&z8J3LPFjDq8S6Gbs+KrwGd=ttLR@6T$*^vog zu4r-1660_-*jAF6tL!`ER*JNq8bYqnkzX|GYeX`U4iZq(u=~tRsYUvWE$p!KfV=-U z-8o_Q-mZpOh6)D|D%s8MuyybTT5f61uQw+=~S2aoCWYQ z700|cw$>>rfs1EEAv(cvrIC_Mc!{D!=^|@Dd-bZqFJi|*n)J-k^0ZTiq`v_^>urGT zAbdiE%($LVE3g!Q63#BsJ@wc zRw7NQMKhk6KI1<|&M?Lp33Ae^aZZD!y}VPeJ`-GBiOxf+5cV?&;~hN5m>mP0QDwkO zX)lt~0b3hr<|kjsah*H3>pyZqc$m&MQ6Yto(@~Q+^CrT9Jrd+#!1hu{&sA;M)N&I} zk6@xas-PW$+_TtUI4~8=HplbK?qZ2#H=aqfg#t5J+Up&~1E!%p;e}{UsK+&KNx3Ie zkNQEP>GQ8l4)Om7%-t?bAo;JboUmmI?Ll0!5JM<9kFvNBczdBil(2B)=RYbWp_sPK#tL=WSqvXUA^EkAD~D`iq+08;IV9v2X*vX919cS$aA??@bp&GIQL4% z%glKvttq=hHKhL@oBGy!Qqkxal~4DX7T2Hs0zNH$@Q*T--J~Ehi?Zsd&#+x$IL^x} zEVeW6$=gNae+9n~R&_!f-N8DcpOE^u43U%g{`4hWAc=Y}yf5T~0j}r;$ACdf(eW|Z zQt`AU@2Q^5upc0y6*mseN@h^_uvKo(vJ&=wv$IKGd6vIzJpY~o=ad}F7)o{xrw~pb z6ZN92uurSwhdUe?^<$D+f`<>7S3*QnXiZAsZYW6YyP2jN(J(3oEcqX{Y61q3L~5nP z_kUo&u>iERbeA6t_U<_IGjD~sKw=XAcjOHH^WfjCFY@s8+|?AaA=c|c`=&h*ywLaQ z>t=uxJ`R{Al zUpL{a-<7y%as#nLN#u7lxt$ z6`eL2Z<}oygn|gMnOHvbaM<2Nrw+-5lk5W{Fi(6QoYpKH{^aB1dd=l%4IOKTQvT^G zZH@yYNG-1E!Lw_*rj_g(R66G;RocbeecRX@6BsA8ti%HJe16^a)+rFX^@(SoKX5)E z8E4cKU1yGMAm-U)n_`!~EV@0TTV<_Ga@iJGUB@k4(p+K*KH^Zy;95pv`ImIUk%Y%#17>yu` zE{VYzFAkHz82hu`2tIk0UQYJjE2_&n3DYRqrH2fOW$Kn?j&4tI0R>N<*IA+$@b^c) z&t9}fQC?ssGSzHY3#B=#T~&?=keD@?r*l#r_JbouuL_Y#m1NAnWL>_P z!t9ob=FL5VLbE8>{?fI9%j?|lzbL|0c@4!X z!`h)9i7vDx|1)Ac+UpVraOg?FblqXXc}vdz-1A<4Ueh4M{pQvnm3#mid zPiD8XVS~BDX{5@!nsO5Us6~iXnjEL0#@c$v#Zw!w5ft2ejNjr4p82J%qJera>+|J% zR)7+wdYiLEy~qo2mW`eQ56A$K4tY!f00D<*O)qNh3ZjI%yAq87A8<_qGx2&jiaW2} zmtY^@8F9dwa?wk(G~+i(On;1k!$CpdZ{3qWySi^y9^xd8svZu@0TLPKeIsYDW{4L# zn&Gbt54Bx~pA(=VF+^&z7tV&dVvr`5eQ@31)1r~2i6(Llt49f|p$5jx*m{_C=kHJ> zZJ)Rza|r)SWTN47!(#p1R)>->)0^}(=?swYKr2TYk#23yr755gSI-_`V-TXkD=#Qd z%cA891ER#2x9N_-n|dB6r|#a*4$P)HKdIbJ5n^joU)NuNfA~D@a=d{4v414O&FXWQ zj+OM^rWypR;&I0ncchJR{7yAx!8BfK~X|JsO1P?vFE1P;dQgw2i z&XaH0;xJ`1Av)p6V{hmlIX{nW%4z-H+LOAbNjOKz(J4(ON2baR%5KxK-VAbt%Vsjk z=#814g!2Qjh$8e@1>%qT4XXriN?<0GVl{FOhb~g%+Xg0c`KCi=LS+ioHWIq*h9b9q zw5+(hO0-|%KUC~(lYe5!$hv(%0Fm3Pg5m>(j0X=5xtrikZpT&5 zMUlzBOp*9LvrbR7m)2dTXEv!U{Q%pR7x-hTtL8$+5wh-}d~`i3v+`87%Zx1m5MgKt z$l%CnFCkX#7b7dIBOdfl?&bfW-IDlwIH&l7_4ncOam$e|Jfwx!-aT%nFNHDf56R`9|h zUR5%XtdVXWBwN}L`VhGz^4i|_Qb=KIK%M%hRa;?)p_AfJ>q_gBE5Vd|K>{t|G_{N3 z6(-U_c4V$q^*)gke6$eZ(YnTE10fTwHvnPTV=S$g%a`Q~6YZ6L;u#xNHKh}}^`O}3 z{3me+;Nn#K&L?HGP!QsHE!jPrX7)fI920dSc)a6^5nyJH4Zdhz!1Ke2-b(w$tcoxG zrG2b@M4&G_w)(>#9r8N1f&Z`@UVsrOf{WAi+I0Z=;J3JsEXU;Pvqa+M#$su>34lZS z%h!b82{pu~Gm$*gerZx6<@Pu_+h#_HvPaaH&_9M4@+nu?46wybBy|9e_FU}^O>WTP zxbUl|*EfK%F$i-#i#@nkV?rJzhlDzz4`M{on^XrAToBDDfY7*_?GUbYD;Zv{hR`kl zW>Widsa6gL5-&(zMI|>%iIv+s-+&5q&MenqQytMhhIzA^Lp%B`6g6`u1hWKD!_PgR zV&+M%`X`mMqS6`za{M!<%TVC`qa@fwuv0{Mk5I(zi}7Ux+?3Kw=x%?w^>)v~sx>R3 zRmBH4Xujl?=97(@&uw$MEAS0Ip1AN7Vt>31?Ra0bzGHx;AA4N~-d7lmR1xB>os6r5 zsl9Y;^BOP)(V6q@zX9-`<_IoX-?-FjNxFC;e1aNCuw)7mGbzG5Z$x`CC;uo??TC5V(1HMYq>GOt940%W+z}#h(>&aE5)sQJma@!M$Nr%2zq7OO7Gvghy~rTx#`8H z%d=G3|9#MUkISPOp4_QWK>ET&iGBr!uX?NNoiueS- z75J*B-rH>YhsAtBA;;4#_rw;jsufiP9*fl{{3ciA8AWj=xQJ9$F6Wc97h&8+2{tDWDpQPn##(K?D!M3jY>_YXV$H@Bf ziGSH%v2uB#x2EyI!#;n;)+$SQoU=%*8#Ec_FMpT~gH3wLc?suqb%_ifcPJ1>LMc}p zU_y>DnecLM5j$L>ZZ}UL5Cs<nc_m2ee^2dA3C^gve4lU&P2nYtmec4NK07_ zYx0)~g|Wb(zc+!s{zqRFg3H0M8dp@N%;qwtQO~gWv_|3$*}iFoLipqH^-Hbt=y z=j~bbJE|ZeWM2Rnq>=s4D}+m-r)fXx{_PQCS-m|ywCF``*$4G7nu2@e8VEC#%%r+<*z=^cuMT+y#-_2>kdF~H=Z~Ms)@z% z^YKLIhmIK|@8_OpI@7f6Uf2s8|r~VuHg)$-#G4Abv0b zg-bZ{ge??qFB%g&LkoaB`m{zNf2>5ua&KDBebQY;2S|!g^4dc`R`U5RX|cK&Pa4V_ zs7R$du)B;09X8;3`E%pwQ@P^vif3^zL=$mvH*iAWS%m)E7JGUheynEXj`YFFP8fVb?q)5g}V_a($2A?(2a$!p&W^ zw+yM&W=*UT5{x-n0?)uYKk+fp)clKGuRqTa(e94eL<63= z=AHWQBMJU=YD0plk@3W<5$^#-jI%-vqWi{uXmniqcIujqBp?;Y=h}-a#m4O(uV8dc zmW<^rfFCtyBv}qdHrJYALix;3JX*Z=mV)t8$S5|?JjtusI1o^_ig*|ci*Dvz<&021 z?~B@KlP(bPC*I%$Bvg-P;L+ghJ!eqHTB?(-uuYMa^i-uE6+=FDyg$%|AB@-%Ke;hB zB#{Iocj8!6C8PC>(?3!(FCkB|KQMIgRk`d>%BP6+-jI(G`WK}|IrDS{@TL~bN1 z{JZRZZ{$$H23CSB^gg22do|dLnY%HVzjfN@Gh@^#$8uRFHp~{HX!cT%HJnbQ6O>__ z_KhfHV0rn`T>enosDvW+J!QS?phEIoByajyfGX`vv~NRh)33BRpT~0SmP6dXT{t|b zggCW?%|o?`R{OtySBRMIEmsQvX~-SMw5#MuR){RNLA7`%G#6vXbt;byEZzHcRtNtZ zC~gIgN1UxF_8pCvQlt?=JzdtlZ{4O5^r zLDXvF9J)%_!`{qlK?&04aogSixGAqa*wJy-{0d`1mMy4 z_dm$KyieHz97+!Up3;!^?|||OtazgO%=+fbRWK5M;5xwpB5~n5DoTip@C`ft9UX!9 zO1Vy{g7yl6*ffjgW$dlnV$;NiU@e`qr2cb6ta^H_H_qA8pN)E3OtLRN{Z)8&NlrWZ zh*RQr(A`Lda{F7K*;6Zl&W0pWWo5$g=sKroaaEN01A0Mc23VjB#W0MwBuR-k0$Z*f<~5gK-4^| zWk(FR4n&0Q-L6G6dy+5bteu`w`LV{gE6L z`b;DA10NP$p1nb|>t#0Hg{kyqVPq|2a5&C}sENibf7p?t-&V+o%rTNlc5=zsM6_~M zc9*Et`0h9(FuTSynV!$t_iDIZuJfcxx4gUi=D%;p9<7DenOpgT_DE}BJPh@#a8WGx z?FnLq+4P{l*6Ro6u4}yCO<~B~A}c1I7X}E9Lz@lE_+`#Dfa{-rcn#g@Q~~Y&R&qW4 zdl7j1Ey@5;f4R};JkJ*4nYXj(jch&!AIBv-O#pld)v6;74k^YF6KO;FkTzT5UMHgz z&NNOKa29Y}VMDD%QL#!tGHPf%5g){}fB zbz%wa!nUTZVQc?3Tt!|#o3;n?zyE@S)?3M*>VpHapC)X$N8C}3go|adXSB9L?gnCK z^HXGr;S-N1G*gem2evsu$fRs$uuAtDNsc)J3tdXxkeBpdLbmfqgQ|h#oy;rl6Iwc_ z#_DaeZgtu?{y{gV)WLmfkJZjPMu#6b_fETkkhYxaUg%x1L54NLb&dJjy)s4s0hKoD zuS@Qm7x9${=RC3kqd{j((qK^9IG(>ykh~K@cEd8-w7HK{u`L!Tfu(yEY<#=%AT!Rt z_498h74$Sm%N%m2;c|}Z|M4;WJr~dN9QF0eM*=Y_8bguNBsuen!}IGu-I(d3V}NgJ z4|hD9C82rG&ZQm%6`H(Rx*MfEn_{G;^_TP&js`oj-!1j<^ z;L(Fn0N-v8I0b2Eh@6-mxFkfggcuEJhtKlkkH)~z!31&?o6ZOW(5L4#TDHnwa!x!f zX*y3_wRqL`iSlm8X}d_Cv_rl9okO`R6fF{W@=|mVZjy~~+Q$=$^Ox1CVPEPxU%wv! zN}Byv!~NBRT!=X{@wFlMDbBSfk}6uaNcwrr>f#)_n_B_D?9mA9G=TZYU#BW$B03tw zDo4xib-SZnMZHc+(cXQ_GDM(uQ4IQ#(u9U%h_OE?Wsta9h8bk^5wZ*+SbcJPN9Aij!ue!Q~#&VCtFv<7S}Mq;gQD1?{*kpV zkZFQ}WJK^ytSp$6%*1TtmllGS1t{gerr=uky~h-ZatmbeLZMLjIJ_5mqZ>G>juI1z zTK0)|T)?%KDB56QhygyB&kexRiSJ$T+9>WP7;wHqwNUP*o)Lea{`PSpPmaLMtpQhn zk@3c4;huUs949ecaPlR0c|T<3!dbIt4B|Oatgpij5?5HdK-0C{fypP0CXUbXddWa! zSug*FX8i?f}B|4hUEpP1PLt zblj(LMSoLU8und!#FbPbT~oR6+?$dk4qIRa6u+8%FNvpsbfk!W)a(W>S!M1sV-5ou z&nRXzBXj3;Ml76aX>0VtTba{~S`v&D`bP305Dy0B?5PJHzWh71tly4}n7 zhYdOXPN~i1g#dQ3J8Hk=uIQDuHyggZgo5|K_68u8AvYR}ecLJjkt8;2<&I8dG94M$AV=j$m_)ZKW#vv_)(EUL44<%r;tk*au5j9IdYRPJ7 zb_$G<8^K!~qmcE}`i^0?-RPJu0#!@HG!p~@qz(lqWQq0;yCed+Jxs05>OTvBk zX9^LA!RgLHhaX}HO@Dni5$+1a0!VLn>4096!oo0S4nnuBZeD2S9kJ7|Dbz_oXQdpI znv`Nn2ot>w=epf~Qxifj%Jh7pobyD6CYl@pzTD+q)Zx+qmc6Xt)T7^^kfLK3V7`R) zCPldE)q1qfhb%ZdT2d1X0=5~14(N~sRu#9?<$~`jOU{)epI+USV?sLG#{UhAPmZlW zvVZ)=0Dsd9n>Ye2(Ht z%P(u(Im!Dl>>VQ-jZChtkX`1NHWWg3ZpE4cmWu)^a(T30TlELxVDPkKquD&B&yX3F z?+nlP`1IfCF62+m8=#n(R+=#DsZ-NUdt&y4A*ue`gB9=`mxy%qC?L6X$(02-v1zQdY-lSX#|+nl>zNuurY~ej!So zDEdWaXFPyfcY@M11ubC6_5CEnvJEyfBLg;1-rpcpvK&Qq0~Gu$?)uGYyo*8fedIp{ z%w2c->EJ#E($%F{!<8;>-(nF0B>7HjAc2l-#eC?{nt@U>bAspKr6JEO*~W?#3pZWf zzZ`f5`n-j`9&6oEPSTZNB5<7xE@Bw40p5^Z_6th%jl1z2#=28yx_8?BbovraB7 z7A+kg4+*tm`zOP>y6^U5$i1#We9IuPl#*@;^GoXwHr)NodKz=TIX7jNAy1qLaa+yz z_V;W%lX)wO5;$8GkCtTqVaZ|?L1-Z>JiM>{($YrUpkmZXz(kRP#jqQpXK@x^yoDXo zAgff5+LXjgQT(C)jq!Lz6zrpIp@3FT&9gHF>zr-wqYM!4na3exAy7)LLjy6@ z&)fGAS*~XEV-~}+5~K+K&4TAQMMuY$-c%*nwtCh5lGCP(&hAD*ouJ%A?;KXg>YsNn z!s}r=te{MS1q^USLYlJ)830E>xWBLJKoLz2_ zD$!YE`~7h5aLv<0<1%$Xu{lDGZcayJBCl4xV+BJs$41lZd5=f9KC(GGbUeFBGkNF- zRyMLMXk?IGM=_CJg1pVZNFY8a3qvzvOY`c9n6R*C!N&{LgoZ!sSVobc8SsLwIUP_?{IQEv-_-^-1g)N_(5hu)q6Gpvt7iT)(S!nhl>o zAIWr(8}uDkHbv!g)-%rHVkB^duf3(aF&g|{PU%^G z2<5EovIQ_B@d}rCpx7pNF&%Qv@ZB-y_)=qEbt1T`)g%Uc+QajMgZ~qdarQfYfRX=| zQf;dP=Im?0eQ!YNmJ4&NC`&X+A8(>;V0}H!xOooy+hz7dW2;JZ8typSBS{@ylK9}~ z=1hZ}l`Kg{(#Y%Oe!z)eM00gj674UeuMHFOT1SI4+*OTtB5xynT3`WsfxeFkswIKc zPRyqX$#s7X zZE}dVMKCh27y~0-B%q^$Q4^|Tm?BukotcLf! zpJ+6MK<%}{?XGjc9Nz5?)lvzXYs1ABkD?iJPcj(45c4W(J@hdfy;Ovjn5vMY3h1}i zS@jghyq%^?w9bkQlN`qcVc8K&cOeMrLy7HgZC-;e<7@@HmOkD> zDi06_L!_Rn(C~!mYMlv!H2Q2nfUpUfBSOuE0~_Fx>w@f65`VJqI>Ns!@LFm7~CS@iVH?_p&3{>`E}CUCIxdz)b<5CbzOr?>;}w zqt=&Qz^r6{PN+E+3<%!Wm%N$`9|G-j@(Bo23_pJJ|A><>!SazJ-1R zeRg<8Cg3XIx~3oqBm;UA9q>d+R94;NQDQDD9Mqeb=|jpSIr zVlLcr{^;HdZm&nGx$nI%9Ukz-n@|d-vI)3c2=Kaalg08=T?~BPSW&H5yhvPbcUvRf_&`qN_BTm#(j~` z;56ru{}?yM)3nZmhsk-8Qt=((m&_(PHJWbxa7uJZ_Dsb5T*KiJB)v^!(R7Z<^y}=o zNtF==nPAX@o*U4BKAMzwCiOvPB!f|O?w2m8)+<-$+dL5clsseqWihzMT#7^jz~!xY z(kxBrzdlbR_#pb>(NKsUGdk;b>J!`Qv91(etvcvJLgd)~&k#4@aL~}iG*STZ$2>+@ ze*vv@QcnJs=7kzlllB5_r%gmeMRCO+_BK7{!%G70(aOdz( zC6yFA5DiV1nm!DW8n6ynueDHZ`zy5mVlQpy?m;t~|GIdw!+BUO$81Jh<)Ei}+ae`;sTMWerX>6_Kb%Z9LLQ?rMbKMTNrv|4A zP{J=>U06QqDZfVA3y_sAUM-(1krVY4Z zwV>OMZLUkrdjNk z@tz)(6(73=-qwsrHD||%p3pn}MnPMBTy2&^wI4RU_bh@8SNtC*uqshF-hgNMO4z6f zYTa57w8aj?j8N;_huwoc)>(KXCmYE*o-aBrw_B7XJypt33iMoZeZ&5ZCL13wX4cyh zCrGNDU!~l2Z-A$J=A?|zw$zmSO_tRKly9Z)ZBTjOlHHErKiCgzOb$hkmP0*v>Uc2a zcGcD*@)4d>UC*UdbvX--!un|q0~<3h^vy1$%}AF&Mt%5Z(}6!aI!Pk%Qi5~0A%RcZ zn~0U7A$BDq`rfn=8jh|*o&9_aXtsL5rEK3y&I1ouJ7OkZbNB9_-(rH%7`ato1WjQX z&RAc}p|XG(UYMj^eC}9xT0daaDyQvbR$Ax=>!|E-lH^cmlh1o|u)^`n1L_#gzcPP! zPvo6*xSxWhOe%&~YxshLP<$_LH)nF00!ITM)I+f~{waQ8xrNcM*Jkh~7U#V_MJ2-% z;y*$n13oIAi8@(qBY#i#o60ulz7l~$lrV095xs8@C1Nf73;+^C>rpJah6}9Ty@@q; z+}q8>wN-M82Sh}yRp5M0KZBqDe;vtLDYx)HeaF?<-lv?|(8%}*D(LFp44Pf&17no# zr|D}48$RrZ@g=PHE8RL^5|@H$poTI2M6)#pO5*-(_RiZi{Fp|vE&TE@2=b^Tij@;7 z=u5TsS7~Yl(BGk47S``+ZZwZK`k;_1d2>5VrIC1~B$Nj{Lm#gq!YGmYY_kbDoaXX) zskTum5=E&WKnW~(Xr30%RU|pFL&&$jlJk3~IB8{b78r`_Y@<7_sqE3?GFPhRQID=g zwx?LlA~JfcLYurt(6fg=wZENSyW_{WJlo=c`QgvD!>Xm|Iwxi~aOUc!-a|&uy5((k zLS(T6dNmhQq4<33JVmctLhRm(rhJ3l6s=iC{&-w6!1=3J{BFlwCfIAzT43t>-S&Y> zhyUaz4aoM~4oJd29f0x-N9KG7<(JWYeU=UMPfNw1Ai=!NFqh& zfAS=a7@yD`Zorj7Qz@%o zTihFtKfh3O32Z@Ui69g@?=ajKA#5o$K)6__iBA1}j~7>VJ}sGOJ3d9_Gs_{<#H|0K zwW+EX4Gus{FU!fGABUJ!#|YrZi&o~lpx_%x1FWWy8>3x#_VnmZ+Lw~lEA}WE5gu)~ z?%G>E9!{SJ%-PO-_;TF4QKRZtq*Z`+CoLs#7OrsuHfbcZ{6K)JqiF#}RfH>u#G@Z` z`ALQ0_}JAN(Ci-)s6E!y5|Oa*NkRSePKQQAqChtHaob_2YYzGU?il! zN3mL{AcDQg+ob0#ApSDK%s%==H(3zXG&S~W{V+LJ?6Q4|U=JwUvQd0R3UE5udO@FB z4OIPcTJC|dg!~>@E+-p2Jtlw@NBB-~W$MuudwL>z zmg#Da34?6-`U?#3=>B*d{_0dbtWMlmy|>Y zf;42hOwh|_#H;O)WYv7fPD9RDrIXsKwa);Mo=@x{uSxu(I9v2zd{?4*w0VM^SK}t# z9yoP^!t{3UXe5yIh?po#A@pa^^UcJn!`L-iR$m5MW)p1~)P8|<98TlP&uAJF1&rc#-*z}OV z7`}mvJQbUrL?Q1?TEGz^PaNTjYsV3uuEedRHP!_g?b|^TiUWA;T!^|!_yMaaqg|KW zEhGy_KFyNd?|2Ni{eEA@)isRY_4W&&>bNCF3b`G_dYt;2t=)*vgaPe^zqbKq-Q}-E zy)mki@{Cpjfr=0g#pK|V?in(X-ScR`;PA1w(v;J^1cWpF5tfwoN=L}*FAh&G&RtQB zu?x-OeteDE8|V%TY(ZiYr0Cp}^cRr_of3_@Erlnq_KaP1;KfUjChFRakm9S}k5R*x zRW(*EABjxV;ZN&>NcrkB{=AG?GzBXFNo~MkpX51z^dvrd?CwcXtRn36lfepQ$Avs5 zW)zcSfCu$D19w}MSDyY=%$w#thY^5iFU05wpbx%{emu@~LaHGnI*(sC(qH6Z`K)ix zR!S{K-RAhEJjqy`Au0iPd~V1l7Cbcxft9@`s#q3LhlmMyzlXa~_z19?gkR7&?>r+u ztRKw>9SHxiX^(WGa56PykA`cNeNTVn;{R^)dJx|fH3eWcd~HlokX5_I2o7}$g5$?I zT~V#&o6xhuA=j&fajACzGSfbvB-cT>6(5`E5GmRps~H7s>*HWb9Ok z`i27rkT+@MxNUHE2~F_n%oCp`6XF|*l^jpMN1BvKk8OrtU(mfC2s2>(7k@37?tq(vmHsmoU!bT8>I|${s8>P@#UO{*V76BSXJ|y zb*^5PvpRTbT}jo}5+`tsm{tGM=v>QSwu!eQw(&ytI!F-vLE zqa;~W0BVyI(vgmLDRT>;Fkpav8y9r$(C>KVuWe-IV{$Z1IWniLrc0_1m!o2vT&F_^ zjvd6LWz)?FymhjN^Pp_I#kDGBR#`M}yZ=I>^TOC_ZbNKpUe~MJH;GR;)}HH!EW@0t zGUS{<>BAJ=XICOl7Whmd*xf1(<7&M#kV|rjdamWp_E1GfAw4g&7udc50SX&;XMzPQ zyx|ihHxN)N%VtpJS z6SSR;i72ZfD~Mylo<1F4eP--@&28|^0?$>GH^<2i}g`2m5qrYucs3;Te95Yhvum>hx1ABP#o&7EZ6!#frcIAMWQ312#f1?Px7Jwipu8iiXce#K~klZQ%z>x6#I@YfT zgA{CFRq+xvdwKNb?Qo71d*4 z!!^imnc7Wp+A~&dIK)Bwb?l7qb?=LNDLI+lSe z?rv|egqwOkdWhU29jXA*x&#F6-JeP)x^k39A5EG=2o79Bm*y8pgAB6@LDza z@XZg#?Lt_-hgDEh1elZ#SSdJms|KVvyPZ&I83JrMVH@Ag_tkx2Qm_0Q?Jv^;Qz_7T zj8+ohVJFeq+%tU+#io9y`N1{n^X*8sZi4h=%*vwve2b`*nLbGEKEq4DmoFiQzrLsC zt~?My-=p)?`Q+nl4k(~?IJn=2vz^ypKB%V`?NhXvF(($i3|E#ZEsO}Hrjvi@p;Mu5 zML3gLf@Y8eC4itSEUe7>F+s^Y(O8@dhG|&e@@Vzl<~om@JaN!eK%%h^au`ywgj#~J z>^%cwi(*qa_LsCT>x*8s9&c+Ykx#(YE)5aq+tTo^X}?F^5j9+*_U1jCTVv}Ep95tc7iA{KwCjHg{%lqGPj$LR z7ligJe>TZLKNpx@NeZpWmXyG;03epDa&wzepy2}2Gxt2_3$dnaz!NG6$MGtS%JW;@ zZM-|f@{YVNW`8x>+qaVUdDrJ6)z?Y)D=}Hv>rw*X%voENiMSqIquUk&i_rvzi~P5$ z*)S>!7_N#Xvur;g#4nHzM< zfIbC0MBCpQ`PQ-;mF5+({|JU@}x_l(E4-tA@6dHf?klE1RApndIZCz;nWyip-dwrs)(zE_=pz2MC?# zOxW5Q_uUJhgSb{XRCQMaoB<7?p$Y`k*g$eQhZ1Fj-biYuS~6v-l|u~#D>kByXmYWH z0ty_M`3hr^2wwuTuu8!>`K|84FaL=Nc{!Jv&TUj=YK8FF2po)!@LReBFe^Ht*LghS z$h7h~u4IY-KAjmjK$X9fi;uV6x+Y?mKusM1b02~*I#l{wJrqA88&I%_$_5IhFe_5) zD&7;9%qMW`0H)qk3VLm|mJ9#46}ipQwYK04j50Uq;>sW5=$34=U%HuDb{Zp7#x075 z=L>d85s6i@q=r2D?$G*psEvJ3=v5AmYJ~E7QsG%<{X`N72NDBrDQ-8iYlio(5_ID% zJCQyJ{&9YkNE$$@i+bF~LKMNwM}W22Wui;4B9IlzRy?>yAbYhDA*XLBf`wpZ+-0PH zU$Qs4N?^;u(D-p|Ha=_qgC}BnX>^KlH>pfOq~sO>2F$<6sxWe*T%EiAKW#zysfSz! zoE0Cln<8{&O!!vz2epWcml|Z`3H2{11_Soq5>KFibmq^D>aEPJ)(p(wAw72wCvn)a zU#+X<^Q)Us1I{5$grNJ?KMne_BR-H;HPAylu1aL_0v#7+%|5F2ba02aTfEctY`!jR zYte9PhPI{4Y^9UEnV0!Xt$Ox{p`05yT|E)APk+d{91HcORCm~}gh*j`lqQ1+AL!WA z-wAv;kPkl8pcsQVQNrwuDl?|Aw_lVn30!0ytm>>gUhhfvnCY%wz2XtOj}l=p6o=%` z2iMKM9JdTib?aj6uCCia-Tzv}x`$w4<`5Y06s_>|3=0YlI1VZ;4pZ^NxU4j7V$2?S(_c3J3_mcG< z41+w7^4){$Ifk5D;>o-s^P?|}MHQ@+3wLeWi~&5gTkj$AL8%1W4~}d8a+E&Y0GbL= zli2BgZLz=GE)1TnMcC&qrc@{-ZgF497d{!!HILp#Pu6zwM~B%?u9Npukt)rOk!`yK z{w6_~^lv_i@FVb~6~l-CT6Q$aC6kt&x;{CJ5-<%=f9N#TMZ~0BJ*>a}gAcry^HPRM zNs%fxQXwNA*n;dLjm>j0>O*1(ifXyBxcED*hM^PU+1~1xFnFLbr8#wrIFFCfrNL^m zy7?C_!&joQ-K=dtD=6WA34K>)KGhSYg+T9#zdz*M3Cy&4h`-*wX7T*QnCv~@3?zBW za(j;4fOrJ>NAS8mBN!W&OLtk!6pejXYNFc{1@NlZnw}|(@#jrXc_KOho{sePp zSFsTmV)JJxFAH%psj7+FXSV0wYaJdgt0xFEGdSPQr2glMWE0T)6*~B~RvdYzYjg!b1=6FpGSWrTOvMZ1 zq5UkLadb-=z+qlo_}mL(ALkwex;<`bZy#I_?z2o-#tz2*+fLk)*}rzJ#)x|5yexTr zVD-x+Kw(MMSOLnU^*MD@uu6nb{vN!XkmGc(Hh+1NFOqS1z>F_i>H5=nPKwEVQIu#8 z_v8FLU7cJPY~!p}zi`QvfVp8wPl(U27#r>IEAA$-CX3)oQ@u*>T#zr9WkjIM<@#)c zGz|mvRCICWIsk1WcFzzw=C;9pE+4bv1>_7-IoJcZNF8OC5Lo3x~Uyu)55)cTmy%rZq-bF zkqWzENYY`$QU*J#*DEg?ZbLb^HWOHDt-23~V}HhHgV+g@@V`@wrIg7;Xh!fm#S{yK+umQQuEKBF1?P&imt?=Bs*6+2uE+@Z z79Ql3h|e}jw*_TvPUbH)tp6)r@RMm^n@&vx5iX&il!O-;GMpS>Tw=lbNc+zSduVQWSke?Fr<4Q^NlA)f~LW(kNc1yyOM9o2XevJHMi@hS|_g)qiX>)l#w6fIix-e zSZ94-@Zah$hfl3ZCS&Y{2!rTnpD!glRxzN7uIh4No1+K94~rvk-`26jhE6 zTJIYoADWrCntk1H3LQ~pDUWM#6YgUj&3hP)dnwGQth_!;bQGHCmZ z)H!h^7@2p)j?}9pE`5K@ zOQi`)M;>mx?BO1GcMC27mPsX=3eY;Iy~9XzoD#mqhCeZ^ArgE~f6V__an(L(TWRYc z32&^R-S_T#gNo@fLtW6Tl2?9wMVKa4B{P#$y?WPTQ7ONkizVAnLAs||w*$#-jMX*Y z9jd7vSDhcnG2}pxTa_(XpsTfRZau4RL}WpUfD5GZAAu^{ZhM8AI)BHbcoh6^G*7Ff zZzPc>l+hNyoZKCwyvp-hi_g`waz^TOj_G>W3p{Hu74!vC36Zgc@-`#1)$w;tlD1Km zr|+Iw5Cdv^_ujfJ_o9#1^Go<%(u3+o80d^*P#C5=HNz`we)Xpm?}!c_2|z0DZ^tFk zz~r10`lD`c3$xH~>u0me4XxW|qn_FGq-N5p$CP3-sa?tiReI<~J~r;H6x6j61e*>O z0)8z1ze%fxD?`LWmpD-&DYtu-@l9^@%8c{q)FZmOfhdi%Sg-J3#b zOs-gk&hk^oS#BV^IWuYEiv4lG4@muKCsh_>bjBev3#0Bhou(QBEOWUb@A*K(F}U%C-6U&mm5p>8bdHltybB?r(Poo)n?Rp_&7 z{ea%G7GZ|AjQw>FzG~lL%z%FO0a2Z}oHPyDG5^_B!#8AlPocOo07ed$SD=-E($%?76|}2E*xM3ASv-{Va#Ye;uuU1=v|u zr1|vFP?qu&x)!dY)um)-Et}|qYyGyA_EZ;T>arJ)7D4>x3Gco>-4%nU>09@eeF3~G zmt_AliG!ymGzpmUa(U@ha@*GXd{blqiPt$f%*hQx1uPY-@5-G|97Qw|T0I@_MiCx2cx6u{FIfV(*U zUs7fH9JOVhHVWMBa83(R7rG*2?YR#`BCh!6q2D}uL0q&{B`FeM&P43b1|r@l^e*wR zhdxrsBK|z|G?zmRW+I*FsOk~<5CNt?_5cC0ovCv4cvuEWW4LRF2pF>DweBIX(!L6n z8O0n~E1M8A2m-dn>F_JX?^=?Omy99iJ+QLjE7BKoejC(3lLNwzYX8wbWJLH&L;#>n&rtF2x`LR~(@DX%PA z#Xs>ZL)!GxDaxNdyYkP^Xu6Nhd*%LT>r`j!5cZ?9%7?wQsgWlXrhg2HXr*18j5UY#pV}O+15@-3_EhX1>0I-~Xj{WyD0Fy*ijAk1 zTTF+Ye5W=kn^~W2RtLkAUke!c$T{{zymCJ}lCqj|s-unB1I$NVT=qGlKO4yPcW0?P zm|1Z|?ni;L8aTlIDJ~Hsbz1A^E0T3l+JN~_S)*XYOa#(r_IA_OmAm?vIj)EZ5p za->D==6h~+DODNQ>l;S!r93$)fPtLt*ei>J;#)riGWvg4q>oV@I!@}61X+I7uy&cD zgJ+mlPhQ6NnWWd-lmb=3obLX9-LDCeeMV|b8^_xAB1k+Y^;Lw7mqykJwf&rX6 zJx^OPB~5~Z_oVO^gm7lnW+LtxQsSm8i2nV=}GRjCLd5kxVePJQg`igVmvZ` zL8b7hgjZC9cWVT&HO5D`H*isIyTes9j%t6U6!Zql^xw9T4=Su)afp1!hj_JF)cKgC zY=P7ehq<$NPy7oou*8_tu=(G2GBR0)KqHzx?PnM;Ru4s|D5$d z(~}T3gu%)efzC{6t;%qP?zMX$mrK95IAMMeJ)_(=4exm_D{fiAL*t>X@sl3i7!Hz9 zW{kAuuEgh((Lc(ceV|6GD3e?WxDU4Rr5X9A zbOc~}%{_h@_9}SD2{~RH0%6qoGo>B-K=6JmYUaw@(-GKM*`UOjmT4YCiwhqWpgw9( z6eogPdb$iSx&&&`h8H})k^-t4WqIq87tC)M%!ewHyc?XZ2-iB0VNad}O*OuYWoc+j zA>hACX!v&>oaS%a<3$KcAt8`82ATqCq;1k0*vsn!nx&K*@#p5cUyatfmiC*Bu(x(@ zme=cGA{sqbo9ZwkCw_=YczRk!^Z3)iPncV6F~{S0>vr}gyFrkrJ-HvgLo237fr|B4jnz$D(+<#=Tr zvA=+@_;FYWatJyQQw!*g2o{E}i(Gzfnup$vcX-TH!+vh_|4FDz~De zhXj)a^%o%*h-Ks>f_zjnS3;oMY8;P1I>nJn<@xsK3_@Cxf_VZ;lX{vtN@b7HfC4t# z0b!7M-!Rz)y!Zyk42b<<2r+>ZuERa2COjI8_;=`oxRE;#Rb*9t1X}qZgAoiQr5rnGdbLmW{g{_)2`O2~mL=M?!XDP(lgqXt>u49_Kl}3(^|lU9uj-Hg zhU8V@-(cwSn9w)7Y=t7>KOu=le+EmCaW$4-qD(b*RHq81k@UfHK-m*_m0a_499r># zZ;1Du3WmuHU$YjwzNlN00 zJD~)dsH!M-{zWa?r8vfIuyvMz05jy4HJI?j&X7Af7t%PZ8~}@?$J;nOT|E9{LqE-~ zTx|@ukb%h$%pUrAE|_c(w}W69$&1s9`?_6nP%K9z=Pi&dv1d;>i7)~O%FflGPLnsU z`>qTWVWfTLV;|RV59dY^x9eca9!9^g*gi*jhJLp>j#5qu+x8F;Pj$Z|TT)t`!*Q|eR>e|bJ*nUL&WVX#W`JWxgz zw9KS>mi6$Ry)7V);+kJ#_~|sN-1o1(p6ePFe+~Sded&O|z5pL!YEWIdcwvkd*>Pd- z`OP^(3p)9U;TB=xJWjE;%zvA*%&4U9h<0nj>s^?$+E-Kkq~frO7f%ptb;?taxs(TQ z`m}+LjXVJSwm&<;9>r=(oLQ*v_0eeqn&r;A2a>xckyL7m^g}}Pds_n<-Ha_dRS zb&iPoI3Dl0l#sZD1;uB5fr4)y4KST!cL;!OR0h0dkrZoqYlzC|AMHo;;tXLuP5Eli zN!iQb=#d4FcTA3ZU#m97muLM~AU733q#xYud~e%KL{nbKk8Bin+z+4qcZENJ4zZS1 zMi=CIs|Wn*W+}&G%}7_oOw4c?s$(HTb`itM?6q?P-IIn5tNd79ckQ zoRiZ}8l7^BeVwyo;xu*Y7nIH?-H0tg19?3E)y|aQ*!eGRy$Iiv1jL0Ndp+qK=}A&* z2TOf$scJ4Wyz# z*@b&VlVISXh`EOe@($DHR`Xc?zPa<5ST61Hrqn#&!B9v#zU^3NK(h?` zJJE{&{@wQg56A)+i2@~o-cGKr80(JOZz7&x)}}om?v#FVpw?xB1Agr1kq_@ljs(1Y z+Zem%{Zop+f8H0*Do0X?#L122C4LKxHX3Zy$IDU8@|ard)@TF!JIKxeB+t|p*}dsB zJl7|fCRk?OB+DQ?f=1H8$6em%ERTDogd(JH+53#^(=`b%*y)e3%-FK3L8e@9`F@|> z6p-v!W?1p{qOh1tmQ$SRjtnDee16JT?LYxP+i4rZAJnAZX%Ir%jtPqy zt1$6CR{}g#fei=OmFzO|@Jp*``1yRi=xH^D z*Hj>#%E=bs2^LX!X;ZES%z_p%4lj>=5q*lF)MgcV$vjRi-qSLXSSm8W8GYu0zU{7a zqty8*4_pxzXTD6LQv-aU`*rahPnO#TI(KuS*!Hd(zPLT7m-!`jP(L47Dh3rb3z(hHJ^2P%WkY#e?eN zXMeY{?*N>8E+zhv+a7r=k@rEDDEZ@kQJWJ#de=IT_dqt@1x zuv<kYH%rn~kfbycqJ+`SNyeQS93dSeaW#}Q~8?-?k^PlV-AnxzD9 z&5A-)UJj;=#nO}w3yV~=VXLm(+6LR#@;?AWjgZa^ko6ryx#k6r=`T|P1W7Q(ty*3% zlKc)X<;{3;mT=NM^kOytVL@aKRDb)B83=1Imd5McA@CBVSz3=O{wkfHj);TI-J4S? zsH$;TVLA`3#@_+s#~#WDElRn@NG>-ky{Vic4XcqZG)4O7@m0XUeY_(LCGzac8^%(+ zxWJ@vh8Vwn@d{B32|}c$smb5j2>IpGrYq8*0uRfX-{yCQD;o}U6AYU6coP`=sin;k z6WmGC4bm?0A7Dd*^d&B;pzHQ1bVpD-hFwQ~-ts&6mU)t#P7khVH7E-nk7rM0W1s;o z6u!ixOFO{aB#r4YHF8gPK-)AbaUzHm;kALM<k#WK&9e)v5%#keS#+N#g`S4 zw=`Wqj0y(amE+A~pupGDL5UFYmC^~3IM|s{LDZMhM<{$g4dx^fmG`#|Z!P!KG&3fp zpd*;5r{J5u;R#Y2zH60&e5rm|)v+2V-y>w{$(9pIoxqD?t7lXWdc-Nfx(IZE2Ty4F z%!abxWUc?_S90chD~UOq`Ywvq=2rdQ<}DwQ@dYcFO^&5HMq3u*nlalpMxZlIW{0r4 zz&jDX@~#~)wp*xB%+xDIWX!yEbU-GQ(9hd%=S`!m6p&ji#Ssl8e=}i^)p=Z3R*T@X zRT)&aUK@$iNbrAN`#Ec5=y#&krL;)AZwXxB?0>_sqWYEGKjdMt<2(`*y7jGkvzZQUBsG+h0$uM!>tdQf~vFtgak5vm@ zhE+v{hXmL9!XgNjwKD0|Ql}k1k559v4?8Rr!D?>tMwew~W(-X4@G@T0pIujE*hb|z z%fn5Nc){*>6utx)k9$@{`nf_^Z@VU{fG3FXVbw}A3-MW?dBnVf`E|O;v}JWy3RcOK zL!NMF8V19My)oSBaW2(FYO+mGcjQ?uSW{|gG5us&_L(2hSqVtD_oBeJGnhs{3M(g@ z_mqZ)J{0DP4xuCTA}UF3;W+u}z%`04D1hE?xhPXg-vH(e19jZOFS3$VtieUV6|*it zYs-%TMth57S#li}hbU&^RhM}q+>QS@JxYxXDEp`zC0m%i zy~Zs%huo16mXWaiuy8X05Op6lkyFtl9rBuz7i^LBKlVn$h4w~E1)id_r({tYlD;wY zD|%^L>>hz7G~lyaAFBX^GE$9=@aFMnLLd&d9lULD;pZ$cJ7D=h4yi)n0$6vBxWrV` z&es=LF)UR#BWX8VBWP38j(!DLs4osWJ*@2i40=!*D4;*%nA77WflZD*Cg4xJYyhDS zh3$BBQ*%7y#6#zH`sX4Z>e%R;0z`-LD0=Yaqz--?yzZRI(EPp~AGI8*+?8WwuavjXtc@m! zL(21#7^Ysa!ZBZ*ct*yLxJ3i-&wCyF34aPE=&seDNM%tQI$*o-7yuSr9;3CMDa*$6 zs`hM}dJf3jlY}9X3H+KU1t{_%(9halOhxUmo1~1YU?6^zz0Kk^)$s+%oi3v=u66b| zm*50f#45nt9@vB2d#YExFzEBy2jHoXtThfRByB98#zvIw0EuMd67d2+9vDxa3R&I} zg40vr#YwgS=ru8mz&*4)cD{HU^DOYBO6prfFJgACB43bLS69}_auxR7VplSZ5CQb&e9q>JVnbR zjzGeos?u?%ymBNrTnl`Di00WUn8xvY8P=&@l+)ZmV)ar=+VfwQLr^j@e8cLriOlpi@$`hQrGY*3cKLU_^Ts*=BtmN*~V{yWa9<={y? zk%~HiGB*I8nvjeI+2>&{OiXCr0BRh$PvTVQ3H)Q@j zrvD9v{`mK+>Fgn%cQTMxrltZEG2ZUB=N*h&+>pAlO1H%!Etn3BX{@yz%zHTK)jEVQ zdY{^4=fyU^gT;nO@m$Wz#iyMJ6yT#{3ZTMP11MSJ6 z)r`-VkT1g@&i3P4$p?EtWH|u<;6q?`^P@Pqoj1pL43uK*Zb$|k@>=zMC`@H z{t140ggL#C^8%$NqCe=r*0j8PXbRVuu32!Xk_6)50mc+Ti4|nC*V~Ekm2E5xVM2ef z$ZPUp?sP16KJR&#Xm3dXbQ1P`Yo4Dlo1>gdPs^~Pa>v4?N)~0RTY2g<2_5PBM`dV_ zX9a?W;ZSC0avi5?+(&-MN6|r#LZ3ninI4->lS#}N!$P+*;?nzeWu}LvgsR;@$s zX)O9kyb1Kn^}M3ojfiA8QFu)WWBou=7b9ZTAG#fG>55D!cN{l3PZeu1)Btnr2HnJg z4C*}}{!AU1V>65$_{6W3b1Ewoi@IUzEpq5F&vUmW*o$}3W#1kx3b8c##V`!YiCzg~mWsV90@XnM#t*no<) zO*ey7`>>gtsve13a~d6*IPz4x@sn|oe0PTqQT=wh?=%j&=tuS*rsN1IJ~I|FT14o4 zit}a-16KE2Qq1w|MYL8o?kg9r$Wrk_^I zHoW>>*)t4surIKh74SgV=HY`nr}m+Hy>Ju|u->!n^JOguh4YM}Wj}2)bq$)r#aq#X z&6>K5Sg-t5PkB&UQoB%xT1$GYwCK@^uL(>3(LP#R1UHTLApYfRHj-Pl5n8nqhGd0& zb*TS+%}F@tGLk@@)l4|-;)QIs-zS!RN)zF1{v98vF0I&j`Fy_e*2F<+$N)t^y1y3< zIroch;Ej%LI>8}!YgCPiy*AHFL&MJ2%mx0NdgVQ1!M&9q+PqZ9#p-A`L#(v&x&ex5 z9N2wkrQzDV-i8Rq>f|?0b1E0cDn8Rj5tRXCbS0(CAYwBs-h$%(pKy`IinE^nR!6Ld z%)^IDQwI|Rtq^+#Do>ztXH8?*ydrJ{#j|)!!ty2GDh3kf^l6S&UJv%F5slsRQuP4| z(T8NrI_rBY-6Tr}PGNQwaZo{G4DvzQ2tc|y(|EY}L4zaI+jAp_u?nwq9>otw2AS$_ zyy$rp3%KN<++kkeu&0hWxpGdDk(aaOZiloZbbD;jp}X(u_cHE^vS60f5bf?-QR%Cg zV&njR@D0MQsDNVVC$#+(M`WM~qDFW~CAJ|>$v%8};skJ!SYObVYy6V@L3`N=WTihO z3qH2xR4VCyd1g#C+v9tEnSlb@dK3*8129jN5ao_4hKIMUkr8a6vtdOW$M48)xt)wy zK3}vLrL5B~3rEfaH7O^f3CtsTFDT3jX;nkx2`S$aw?U9oaXo@|M$0sH)uOu(XbB`8 ze0L*Z>0H^~!VqhiSj4ga3&3B%ZRrf<0Ng!~;A$2bFRVQXu8?Io-+Nl@%5`oEDf1Fx z6IEsS@iBSMzxVet!GnM&Py_j(;%WQz;##)AaYx~j%y8@-Qw>hPu7ug5_4)WJ_W$7ZJ1%MB-&hN{BHjNY2oCZbY7=Lu5t zOEh5A7e7pjl@$RmVT`3Q)E*p1`YJwW9O8R8`H@K^yg)JI_nEVV>jOIc4c?XG10jW> z-!gVoPKd}UmsX6?b#Cq%oX5G9tfnTRzibgT!K$T@8R+eIQeg1Zg2+g6er8P8*+$2@ z9fMhmPekVN^$nN~6&&#f+BE~}^ty1%DnJ&Z*Gh)s6^)81zUrzS{}&nvqTjr{((>Lf zP1N~Z|1*~d%H$$}Ob%I1S9?bUVeYT0*Cf<@{-MSk?fmsJ8a9ut=10~gMTIH^Y_q0( z#=q&-85kaMxuh77`IRpXxI15q&_^QqF6CZ)5+Bdt$ z{2W}tAaoFqu_3vgz_|9ttDjkJoj8_-6Zi>i9vR{m$a6S0DWuNq2W&0kYgs$V5rl&e zYU{397UbGjcU;3q2j(P3%{kec-!RWE>$!m4PF z#7yU`uqmF=G6IH^i`vxga>c}jw~*q$?mU6eJIhn%(k!+ZDM zlhEw%5JuVzAFQ?GkmDBrf0yjqcWZKR@c;+2A@Vc*M`MjZuXMO?Hp;CvNRc*Rn2pgq zU>)UTXT*j7b1`pY{Z9BN2%$!*Afh)!FUo)*owuYSaGf@3{+Z3hWMPk^SFn?zS~cbH z1ijScC%Ym+F@*jpoUt3@!%)>1_M??X=LtuXsgE}YDvTT$vawE)&6x}5 zf-6a4?`bU@&M%wEy{Yu$-CJ5>rd=J>rQV;2K{F3O>N52iXhX7R#hoOj2W2B+a*f8w z)S)<*^{mU@0l?3u5Gn`#u8L_erIM+BOh6af-U)B~W%md^rVH8!ecBgvz0qA|xnX6* zK~N0jezRbTDMVY|I(Gn}pcHJN|8^JfgyoKM#=6|KKCB<^P#_%&d;p0yv5Xo^6SDEX z4X_3F?7&X!++-du7634@XzE!>gV`Ch6EC0Vi@C&1qUPq(&v9L0T{!Knqgz6C+7LPe0i+N0aTUgvBBKldXq6+ZD{JPH&KBLv2?F@BXNzt+o89Iy%P9Xx@&g4;s zbhtD!i%-WyT=DQgD_rghx3$v)23~@rZArtP+PA}>imW%7kRwUb(`ii62%(@C6>*et zJmsy?TnGJ&FwdbwvG%dA_JoD~pKd9|x$3g7NgZ|ng2rQ_;>#2(aoBf|{44@o1E(Dw#*}4{I2}E zqg#g1`lOd`v8EjQKrw>aM3ni1S7Vt(_+@IZpk^SO80TPg5~fU@S-S>%7eb3wME=q9 zc=^@bTt$~t!h?o`Toxjl2VzU}3M#ddN*T=)u7n&oSZ3&jBX>h+kNeFgmV=S$H7Q3h z1l4LK%XPKm!3RPB;;|p7Q^yMIt%zU`gy}Bw_)gJ7tmSJu-j&SCYP1^65-p$R6_G=0stE{1jU)!)9h0;Nh&H>=S4B>L2Tva*JfWJ z|An+(8@?CoDSvcem9kl1KAK*tE3_?)13kVu2-U(-)M}@C>}S>cN%~kOLCrXrMrxoB zXau%;T0oC81kA3c5^BflKQ2M()Sl}9Ab5yu%`zm^sdZ(RMgj=#U-BPI4~vLTQvn0& z5&CODF376YC56J7-cNovbrzt7Z$VyXdvriI9CjS3UOK&y!7rn^4nxSjpa@{fDsx1 zRQ3r-t(4d_;FPM`ip80%;EBn)!&1?d1&AYqrR%y&1JX|LFmC(n(Ew7;YU!m^?u(0^ z?zCSZnWFbYK7tE}Iid_Hz~xedBUpn*X8!>#_YSzv^HTYcXL`6&wd`wI@}@cijCtuZgy$xk#M%rlq~7g=Gu`Hz*pW_7b;) zV50(2o{$V9U}Cji+6b{~7gK=|et3c3E#{SC@?>WnvxTkdu-ccH!ec|R+r=pjg;0sE zF`&atVXP_nf}DDrSx9W=Z7XpKtmex}n+jXoDHBBu6rjtE;hrHrH(^;;0s%Bz)4*)d zR!p24Y@;Gnz#P@~ykiVZ*xWz7kLZLsXu7d5)J7kFk_U-y&b@V6NtbGZo_ct6*LxMQ znd2C-TfRnDOrL)1*9IWeg7uAxHSuoE3W6wV5g%%(+I&1qCC9x3Rkd>sH&>>4y_(le zCCeZt8$f_~mGmP4m?wccs^LGO6<3-8C+&TL&O&EM_yR9@#7vi&d;`~$?SWC1SB9}E zDF;aiH3YT~W~?b)F_epy$i^%vIq0vjS967)X3<3p3c84KS3sZFfKiJ695QY^*>b82iKGk6X0cn4gjFP$f#|&6~^E4 z&;T|RRdRgwh-Ee;%=bPt12z>C9Kj2s%6P&yBN$Y>NH^0J85Jx<1$K)Rf-vV574mZ? z6V3YfD02*{{T4rrqGBia)iS;d->R&;bwnri|7c)?C@fk)?al){+&=^v*{+7nCfkfB zEnASa7p(|=W=;0IrZpAIo22Dr?}&uz9LO!YX~3iTi75s%y?b>HYauDD?UK4)V#b4R zN@F@+IYm~4px^djA+jP;zjGqc1kzqBye9c;`kromm{Oo)FQeT7ExP!ZaEbjpa@e2H zn$AMnTP4O})_2$9yUY>b8qW2Zsof*d^URqBT|KoTk@)Y!F^cIVPLB&`9-L$aXX#o} znW=lw5=KnQx*II>H%(iFVsV6q_I=aX2Qz*I%578HNL%r9ZTf4b`t#JE(pug#rSpE( zbDd!zH7X(Ar10xql%S`xEo5h6Vn1MG7#1_Z#Mu_R0JMQ|zB5P^MTj|&i2Xf8D)xQQ z+onIS&aZy&-MG{Z5eF@*l(#MkdRG!PFc0 zHX#+Dt|K3@U8N$$EcYY;Rgo%&T-J2cx+UXL+!N8@-?Gk9n^15p33uiQQzrdXmY|d> zL7vuY_ZIeh|LwrDPe>Y2_^AT`$$uM_x*(mSpn1Q!J4yUTw|S}0MomAS5H~VAuOOxg zqejKMLZ!1h1@%2k3zmjcWT3I$q=Gwr*I!FLm?xj&BKcZ>Q{%IU(uS7_7}#hn;WQgZYDPh7M69KE$jcylm)q4*v4VLeSeZ#B~Tzwe#);=<}DdTp9HVp9PWMT?!vF*cVho-dxq` zu{24Ln4KgF@w$R% zB;xg}Y$c}>K~Hw1X_4)PqQ*lzmKuvj)-W{Feoq5wZUN2CAXLzL&>HZnXkNJd-(Kbo5-rnpggaQDUC}cH& zI<>{&7mNb*?T$17il*rZa+Msjq~92kGW$!QPmDf3*gkuDPJtzgoGTL&YMK3m#%fSN z0Dy;xO?!Nf5LF}Z`va{783E{i-EySV4Dnuq`hw`o;Jq6<$3l&6Z zL{^SyEdt%O!OT*{E%|XMXRd38mwI^)PAN%2;N}hfyCO(E`{Vcyvi&oRm}i~7QZ1hG zc!wcS_ozjhH{Tui=qIv|mrXtu%K|l0U1=VBY|795`E*(ESjBIFX%5)|TPCrzE7acw z&hu64dci!UwqFo&%OviD!ys7lK>5jJ^L*BbBb7bO#ez;^r7AR2gCkvT5V0Cs@0=Ca z3HMqLFC$3R*rZLWbj|-=nm%AKrm-v1C90S)ilo`9Hr;3H3Up{u(YKO3CWRzaF@MXN z3+Bc~IkPsDY<2G{@(VrB%`l_BsI^NFrKeoOTxFZ?PH_G#%$woS&H9GL;(u@^i*!DLz%HmKPmLZnSsFJZO?_XmYDb` z2ik_jHtxpn;X(4MblxZ%*neN^+!#{3+j-#44wf@m+JS2FmMTD61Ux>MG=7oUjd|;A zkaq3@;le$oIED7U&Px`Q;UO0es1UEqVhef`EImfL9|DR-VQ$e;t?2&5&&F@zWqVuI zVp_NR8Xq6nefyC?iJ;_;-bvTd%-RWr>9QbuoV;XuqQzUCe)zp~^SC4ZJj6$O+DJDdK!{!*M*_`n}YKGx-|PPnxX+v~;#`@~8@vrFP7SDz%KS9=CJN1IoZ<~syR zN-e28*{qc7S07chg{b9xh9QT@>l95Y-S~#G)Rd7jScEFxQ9iVA_^Z#GpQbHuP#toa z^~wL=hc%CYux%@NzRXg=3>?VyPIny-Z$$-n-qzGtq__52a7}HR!K%L*c;^e+47I2D zbPC3q5H+p(44VCPI0@3_?}buB5>Dkh#%%#d^E;uJg?t@&4Q+xCd~NTX8o|$(Hif=< z;)j5BPrkNjNJ4xWK;2(Jh^CoX?%vKAlqgw^=TRd$1C}xi9DI}oPx5wNG#oI>w-3>8 ztXVEZmEYe3rn4uIO%VPu3?A;lcn;PIApgcvHo(yq& z{0Ec(4jQ3y=ez`~OA?}k&F)GffbbO@V;QcM?O3r_Fld0{xGy9U#wP${E%@YUR2;Z4 z)dp{sQ*{xhgv|lARF?W}g`!nlUl(UMMapb{uwZpV%9Fi8Cln`5Oc{~a9g(Cx)~Y2!k<@h{tU zN}tMo9=$)K#Xqwg9X9erdm`65afF@19zl)3ue82m@FIEbqvYFyF%h-Bv-oN?zt0Ye z0Oz-z4X-jl#ehB)*!#}ge@wi2L-;8s=$~Tk8Hxq&f3yY@J1S9ahGIY5t|(JbP7V;_ zPEyoV)N|0umVa`vA9`cHcthmzI`GF`q9RW?aLu_vsAH`AMYd9OUurU>5bo06e9t-z z3!8um@pV zMG6ZaKx|=bdDOaX$zs2n0ZR|@2L(fL2#ew+pR;P`Ef#_uOJ~IHj)Km)P!4r`T9qt9*}c?|8CMI|MHffO@V{>ak(Z?iaUj#U*q zE8?c*x`E?zLAM8WpCsx!!3TT@TAdghv~AZ&G|HE6Rd+ztA>4t1B=s}fROyiSg9m-L zJs5gyhktDkQAwfffgU)=q)PsdLw+mHm8`Jwo^J4r8Z{ATeQXzl-roYRoxCuKZbFX{ zn8i*R>pjZvw-UVzbt%A#IsO0|8O6zG;mIP+96izr)1d$P(f8lRh><)?xYGtMI9NnSfEZrB_tIdR85Ue%>G!5>?Ku$m2OTI>dZ=DAE;kGK%q_JGP zNRuc&%2y?4SFqrodaM&zqP%?Sb#&4*s^$#1_PyE6GMC>aK9NQN%@Qj3U?kSv(|Z-u zzrw7Bo0Ppw6_ySO*^^60Am2kSJJ$V#&Y*PeVTf8m93byNgEZwLG^wQQd-H5Qb98AU0 zOI#3L_J5xv5D6gx3XmcLnJxU}$vA~&aZR$88xSxL@gp5c{0F1K4^CiCjY*eFZ^#)Y zNLD4@dM`wyNqIXh37cH|aEj;4C1Dae3I)r7e{vq+slMJdfOz;umCNt#Ek<-WCt9tW z&O!e6U}O`<<;2B*iVDj(IIDlgKapP=)Z596jCacnqIzAN0|)QduU4-xPnpKb{1q@5 z{y6)R_?j&@f>0a%U#w72p*>Mx5(}x{X=b38zUEP|mx?8~%D4m6&VB`=7aok`N<{Sx zvb~@WIa-9k`jL{h{cSdhI6*lJb>`- ztX_fyN=k&9b7(?REMQf1)cI!||2-v!!1aPg?8Fl5JNje!Z zeXh(=Sp#vsJjI0-6Ne4rYwVxb=QW0||29PmO}-3 zyq~sQf&b(8KPAx)?Fp4vl`9-C1U|q!+xF&ll^(18^I=AuW0INiT-kmWNzQBj{ko0_ zn4iJFW{-8ZZ``Gmj_WMq-%HPbWBM~d3w;m6x23iL*s&6{csw4v5++tO^*Zb?Q)=Ny zgllcCk;K`PvZZlggS%FYm+;Y5C6YT(@!10JU9_^!Bj!>UNn2_@TB?GYmA3YsB+DE_ zkaFq<-C{ChQSUpq2N(UO+?nbvFCu7gY0p+g;ROEuJOdvxr&V;~p=Bn#HtM$$Z=b#4 zMDJuuU7^nt^nI7D?IF*_q1;zZ79(*fT7389uCy=P>^wC^Q(4Js?T#e;V}jkJ$=fZ* z{2=>zPFX^?L`}bQ!t-5_seNfke-&3pZB=3B@~NOkAIm z#S*bbuC69lSa2IZ?1aGi`H`P)t5Hmoi}OYz9YTHG$I|pb=#kT5Yrtzpy^kt6(_lw! zV^RYiy#DP=vmoV4Xk+HHXWd$p+n0PRs!cm}aYJd7f98y|wO=oHYTyJSSXz;}1t2go zAsu8*8v)W)Coxt%Dsin2r4jDX6696`$~o^pS%eJ)4t<-c{%&+|IODMp(JP0ezV4Ss znS)eyrj)igi$2%VbTxYAh-CR3gra;uh&v6ygV> zD3w^1Vuy2DElD|ejiHBIpyYS;zYsOY?SU+)N%+tHQd$X`})`N zNZ8`VMoI?RxMWGt%(3{{o)RFzeI^|wmfdD!%?e35iKS+)V-eCT4N1=n2201r1FvK{N^6?5nKzAy~%RVU!j zg1$K-LiPz$3NJ7<)~X#XXR;R(b+k3IU!OB{FqmSv2Jcqz|MC^^p71wT8Ns_^s8#T* zYd=c9we&Z8!wh7q6s9ZpkXd}$&`O-*WIj%PkFpekp22;bWj5;iep7YD3MG$6`37u3 z@2*>c81Rj#xee0bdL?Mx=r|hJ48|urHwxo^fOa>9e!4x9W7CrqZJAI^YX1ktoohd}^9T1ExZv4C-aV+SW83)YXmW-0qURx`v zkjt?bm34z6HzB%=AWxxX*xx_kH^q8bW-o0VQMkFbXXCqCR@qQ0%d{i*FUlLL zFrr1TTd6CGtOjE+MeV_i9 zu7wI25kPS)1YOo>wXg~esOvN~_(#Dx%ctG_LEv+&rlcujDMpo12 zNoZcWTI`HD+luk%qF5R0b-*-5wgPc=egs{jEC)VJ5X0t9%2m0AVG<_`)Gv#o+0fT| z{SMp;4x{`g&>MO0^k((opO;2o=%JQFn5pJ>PPSJ}K1q%AgXX4C=Ndc1*l@vO%p7pT zM1`McU1DSBaycK^2P|*LBqS`;`><@Gt>qdE+)p0fV?ov3Bo}L#)U&UY>Pfw8^CZ!q z_UYH&&YPqSGRm+dbjtqd2R+D1`T#HunT0QA_eGM0SxZ}P6jU4G5-&d=wbx%;%EThf z4ys=_$BW{-1{^kC$TJAJyCtqU{Ajy|oiLo+OdpKpIFvbpRCePy-X0(7bQiTA)&A&yi>yl?bNebpVOb7El=xH z>Xr!1K09&ySuJIc z3JUmH+#HXYXzIp1i1-z4&S7#>cJ>CCENt>U8?@$|HuKu9g2$_%a3lK38MAIEa^FXa z{>MMZGPzIavuY9;CV%k<0Iq+dL5a;AUxxh1G>KpN)_=H}Xj=)T;CJcY{inZ)X&mt? zSL)Y!%}xSihek84r$mtU9L{mp18D8g{Gn>84T1V)z?Y*NDh7U)*8WxDhx~SynZr-@ zIs*n^dQrsulwoFHEEG+)6#s!7^8UK8H zO7x%d1&Fa{6&!4;#*N|sdz6n3c?4W_Jeclx$k(q%P1b_eNnNvZky+?Sb31)lFU;8G z1G1o_P6ux7=Azn_?0@RHRi;>&C(K62#N8gFTJ$y9C(=Qbpjfyc`Q22edHgGLtsxs2 zux0-}2q7AZ@X@!488$G+W`Ba$#i+FoqfVq>L4?R^&G;!JYL>Xq671NLdf}w7jtLhf zLW3fV;SeFcSm?L$WlrNF)FRM|sf|n8@xOxrn^EaqPia4uT=l7C+&8M;a%33&ezdKk;mw)h7*55)JoCHT2J;0q~OGD#E>-W zuBYZ2kyygaYLwlH$2V8AQBkNMk9HxdK-=^C47|o!Yr~(F3AI4KydBHPSB>u!%M zz?y6d)t3w`zG{bU$!eJgDq{Y4HMKYZ6k^8Sd(goA$K>2R5w`5@>WjbxAHf^w7kXO1 z=&^5ld=T5|<9ZkX7su~(Z!+E!8LB9BMKD%LD1eO#Y7sB?9NQ4=yE1lp$S6-$j2A{x z65gG1h7hIZM|3A0)CJt_kxONMm@}h+kI6I}YJSY}~l9(%G*ez{>|Fb4JymdUD!kxSF9QXYgU-!X`sF3|a z0Yx42+s9#W`6K~UV2!5OekJ~UDFR-w*lUDlsvJ*QpdKTbAHy|Zy6ioCsUl4!fR{J` z$5+hVi8|nSz<(%q3mISX?Yp+>x*5fOBfmnQwR9#Bc^>*Mo0u;7CKS6mYKAeQf?HvT z(}FFBgr`p`c!ZKmOsZI+Yt?0$Yb&#Vn%%NHKRoH7;LNoZcoKBG^!nk?Bzq0pa?qvF zRl`^rCf=cbVkr6iE6+YEz+f!XujWD*OMFIEv z@XG2=&Q>@udYbcZ58%VTGBbRPJXWKmGI#rFJIxWGk-^LBXVc8nbZxUg#UNK4P1R&c>HR!p>d$5Ag6 zn>&^Ih6S<$_}sS9i=GT!_1Hv!QLNDjBzo*$YBffmPOzy28*WQ|<~Gi!U!klPz4tw) zgeh?jAY>9@ADmGl!!6}kKl|V%&Q>zl^oGeZW2tePE(OZWSylfSqM?m}x!Ui_80Ty* z7ldex!*1ZSN+LAjI!G+}SRdB4vk`kkYRkeRHY543<2%aFx$z0wIm9z?#`Gpd!FFb= zmftQ|xM>Cn-S1rY&n;5XriL2b?31`23}~n9jQ}LK<0}DZ*?EjukG5Hf70J7 z8N6@|XVEIM1h-ZAV#ghbdoQ&VC=D;;d^p&j-n$;*AYIgqlyj62+d#<0sS&W|HIlLB zs+yVKJi-)ueF@eO8Tm}rL^RTlUoKdy928UcaUh%vO**YY5a)yonNe?DvL}m z50I`RGjG5cTXFO%-#w+Cm(O-#{S7013$|j99#HR>t3W8e=96+2H#>Pg}T*RFBgOkWa+-goi9 z{rfcm&Pprm8BEhX=37+>O17P@DbuX0$8hK}PQU^uzC*DsTkZbXc-RSJw{A-SeS1nZ zWI3+G>$!hq#nK4ns zm>3z7(?myMu}rI*ZSpgPEv${Y_4w9hF!%PR9cp+u8XyuB9hf@1^0t1g; z-!J~lkm5c^5qOGMhWl(J~`o|Utyj(GUoxN@6)RpW^>@ZMsvcP z8+aFltc}LO)Jm|Bq(29njpwfW^qDR1fsSDZ4|lx|*$OWpjjW%VL>dJ?i}+++zz?WO zre->LIBV&rjZ(ps4VFCLc!Mq9^WJiM{i7S5=LVXPfscHJiZoS29CtjYpksv#K#id( zG1wWoNAc)Os&|Wb0XoR$f@1heG04CG{C)kc*GU@UBO6z%7L6{INy5e7%a2X+0xwVAr$ zqcR4SStWDBLkEz3PHf>s(`}*{%f8>ORz&P(xSx8XK6QIV{hY#BL)(Nl;od-KbRD~e z!j8Y>6kGoY?t2wUhK=<_~Ss9P5%A8e+ws}JTmqLrGM(4)oakSZojVZ??%+k z?1>~dp#YloLQm1r44CmE6G7XRs)c3U30(mAa5ah1T&2u2Cj5jZz<vbk z@!GZCyEEbB?*4Nd(t+W)7*MLW)saVK)lPDuA@i_WUf1=z)DTvmz zDivM_{d7vRcdAev42mZv8?gtotEQlXgI#!wB|xnDFm+PsBOB`vfiUEkJ#DI78<{?j zH>?;izMfa&C8-Ck2RxxMgkvsu;sYu6& zxjmX&Qwj6tbl`6GcRl$Ts@2N;lcCitj64kzCUKUUTp(eBX6J6d=%kl;lM{igDOY2Q zQRNzR(_*fHp%Knj)%Fp(LOgZ0)#{e0O+(d_zsLYK^HG3_^Agp-9c>fNk34f!{A;66 zZ~K>0?`!yRlCkO1j7t-JdCh6aa z#?|n3mOhIIlkJ?%NC1e5c%)lkM1meF$>=pa6R|H8s?S(emr%)p_QzhX)nKN?8P?vr znR7~C<9v@CxL5jXMj^M4R-WMJE6)fiC?mVx+Tz(oXwUf;nv^+V3PK2{Uy55|s;W;< zgE~07dmGFKXg6?dFpBS3BnS9RvVe?}$Jp4q5)p{oT^P)Cww~4HI_*V9H^qGD!IdfM z4ZznT=2lmS-W>H7sd$|w;d4RhiLk0F$%jL+(E}KSyC*PcaoD~NxH(Z&sy1o(1&8Ey zM^Ktrop0VxzkQD)_n+UxELtOK14s3Gdysi$7Tkgehn{V*wB>w8-Dau)4l|srfEKgy zbg6!447zI&gs%^57P14K$lQvcO!$hYps&XLI}U7M$%9H6EEU=(asGG?Sk$wgmbB?o zM4Ri~ab8N2cW;0D=^_5*VIafQ{JN8&K?(R9#{?6&Rt)>05&Nd16od%e`FxC`>OEJ1 z$#Qatuek+2O4&Th-GZC@48rp^`VPUtBm2+}cK0~3YvS?VFE;KVl=8j+_t$ErVPX&x z?MnJQ4GXNWjxKp09rul*O1mvIc#m)0%}vU1G=PDx3W?DS6OuN9~FluxyiaC5tpF=|XHD^?4NPA8bS*`4BIHzKL`6IgTDH zSLa8~`YW2yRKqI-&7gX9ENUp77q4FLIa`S`<#L#&m0eO@r|NUEbglk&tbT8|G@mmm zyCRvFOI!^hoMxxnN2!PEXwMcgaMI1{9Eh&>I3ev;9J$U(75(Jke8rijY4NlRHUbbF z+u5c|bTT6WGu~$G2$MhZ6R{`Q(>P#m zMd~(aTzpvll;xJh=m1w1D`RbBFU`@SDWAVc< z(W4^hX+}C;q?m@_G_jAtya~%MK%IsZSn$dCR%LTb$C3g0{UHVtbd6X3)wi8v4mwDd zO~lu3JT9yB8#fcJokj>?x!pkOpx-AG^I^5_GGvNhgctwvd+kOjAHF)*BJDI?!a3^f zI8I*}G$(AK(bB3OBp7Wd9xIG+ENr|Ef2|@7FL{)zWo3p(z(9yNIce4DU8Xui1(wW} ziQJyEwJKiKl5tqJfmg9_)dvsRzxoOJ8U-&LnQc6v67Xn2uTa|?CIh0rlLD@$Z1w5E zmj8;k3%X6so6AMiSsPpRqFAv64z>Qu#72+%eCqZlIRbRr`=LuyrHPdQ+a>hO!9KFh zC@_0FbzRDt&w?6Pbu^NNK;Pi8dLzmTnjQv#GI6CzI+gY!*39mnjBJ;NywW%QsS?CU zt6*;X|E}R%bBj`27%4f(aR=OZfj77hrH_XdIYc={T>0miB5g*KV|4FvLFjot^+aH@ zQX)Jw)J1cuAeh_aR#h;f%32Jf(rcE*2RB+gtK}v&0QMd{Nb%3#pfNwfwS9K~hcN@` z6SwFpx;tb- zh}$J|;jl^uBL<_A)^W^AIV2-o_r`3^ z%9A@EM-T?4GbApM85u0X1wZNwcr>@5rbfsT$<|L}=eq4n-asA(N;R~wNx z_7h@&OB|zd;B~0x`R-Pd2Iyy@S1C4;CM(%dr^QMd6Q*3$f%zG=LJNEMIdIC#XSu>c z{U~5>Av6UtY(C0z9L5(Q?m4}YJk(Jm0H(~j${$ag3cll_Obg6$iKmwb%kAAd_d|xl zoHyKQb}RaTQkmxm0V?h9BKb^+?@dlLOZ`%y4oG(L-6f!)pG(EEcb$;xjUx}@~0DKPv`8k^eH z^{iN-skTa&3~KzjiUV_DU|3x*zY0Ahr<`j`g7}X55-hp5RRr5U-Jj+G*+DoRjw3)W%Lk3 zvX*_0E>e|THsuOI=UI{AIq~XPLn&Etr_2*-8vyAdM%Ng?`xY=Y>l86|&7i>E5_mr| zXl)`|=_n|P`bUtxo+ztbDEVW5CFt`oCWr4jnzoyHkPOmpD+A-1h#NMtew!pg54 z|IqHkRbAv2ZS0*CLgl3sGs7-z(}n@aH*`+8azH-s)^o35d_o%As%73??f?aQr}+U! znSfwMG)iBi9a+eObaB?BT4{I_%U5aDA%q=apwbXoTDN)s53=tiaQ(m9>7~gX z6DIl~0(`~J9&w!IF;EJ5In${}yj#@r%zd$;;GJ-J`Em6;R|P3y3jyawu%M8MZ*ON1 zs~chOh?R^TIBY2D0$8mgrs}^JukK;77T7pkUxKF}f4~t2q2pSV4~xra&a~?ewvb+u zB5<#x>=ay+HWZ_O>~2)oQ)#;|N8G=+k}ME$4a%2?=G>yFx(oQY45WoN-hUi(}+11DD#1J7Ioa7`icNGDp1h0Dg~)lqM!Ly!j_{^Iv?|pG;AW zWYM(*ISBD$BAU2uNqBUmJUi2Ug$vIC<_dbLpepp?R^;IUEE%Z5gWZ-{YT=`3i&N(jQoxeoy+BNwd9(>M7LpKl-gKmg@? zlclfEp;3M?bA|gYAe9e2=p6MAiF(xo2n+IIyn$57(vAA1SC}grIw0TR9yZzlro9GA6n;%T@{Y4Nr#lqu7k^!(y00w5nKy%HZUpqyJ+_8mwODer)?|J-jaiC^D( zI^KWFrgts)NmtTkLMi2OaJKw%6$>7+UN6!g^Thd19$r6Rol=^t>2)$4k8ggK>2}Bw zUv(y|R%}dLZ|8&Rx1HgM{J@j(hWUD5bX$$;Ee?~p(%KPm>2TmVhtUfHOUMbR*rpsO z&cp!&7DVT9mo|;c1fZ10M;>4#Mu0kjv#T?X%9@n(ZUV;Wl<;9g{4`UG(ccB(kkCsW zqG|Y*%f9`KN({bpRv0rEi466Ma2NEYlpoaG4*!7bxmv~n6l19w+V_ztrGq4e^gUz@ z7TZ%`#G-TjJ97mlC`B% ziasSH@g#WV&xe3apTh)g=Yp8aBBhJCg=gBIoE(Z-D%oFOU;FF6!{gofCE?OkKJ8o zS5?mPSYv^{6S%qW)Ve`HKkKf%+BTcmjw=jp9#6uK)t@^kuMeT};FI7^O*&GEhRv8L zLD-5u0D@iH`MnJj0az)A=EBOlC5v@LjNQpsP5KVZzipX+knloIq z;XXO}_>hEVGeWRlS;H`6F`pm^lRx@d_(M1I3?;2%;gWV{y3tRpx1oP|y$Oi*11)Q# ze;mbJDIf&gIw~)(Mgn6wWY93-aKt5`*wsqqJ&>l)7BGQuf$a5Fal4OE6c~(*8%c8u zFY${q6Sx11OM}LqH>rL(YYAcxk>sub=P<3>twDO}@T7EGb~2XbT$Z_bX`9}bgQT1i z*33zq+Eat{F2?9_6PaGmLf|b}tOZ$@69Fq;^28DR#k4mJ#O1wxw-$Rcx7Ca2xG7iI z2ar^1Q^TNBiRsv`k2kmuMQ;bCe3P>!*3ncZUE)!&evhz7^7Qe~)ar*#MJzAcF&^xWbZX`^sVxU;0j&e##o(^S-dAg} z0^W#O(pOM=GXF6s6vm#XP-p7*rWkG76JtJEgL4P0-R^PG)+!|A#O$cq>>wnRO;&kyLhf zhzT^rP*2sV!3nt-@LKK0$COxlB=W#~eB!lM}r6-H<;=d;pBqEAW4`sVE(4uTcQTM^Z&emwh_iW+}KA7>L zm~p1PwJA`or&9WdYesnSvgY|B*5PkTGp;frv3WdsW~7bIyMZ;NdCD+gBZ_w~X2n=e z3yby#P)Uu;*oHoo&hr=@>%SkN=SULw9q}u@IvaRXH#wDvqj|8J?-0SngOj(UZ za~8M;XDg+y!4AYLpJa|{?xKrlaIpdP=neRWaNJ>4Y02SM3A@En8D+%QJ1Jt5q&095 z3*aUq^|+Xw>yn-w#@Zv%-oWF@5o^y?%Y-&feEx%t3pA9-(s_!SV)@e5xEOZRj~)Go zDJ>?wN2=l&F0m+O;t=fP<>4dGaQ`@o#77i~P8sx;`Hnq0Lb`9CPlc{czF6m7=$lod zUr!_gO@obRiVvOmD25iyji>A$4l}|bTEK#q1)V`mSZBx>1Rf<85j1N?@scI{vNV4u zdb2}2@3r;JsjqdEc`_^YD>PATZ#&F93^8xiWHahg6NuV%mUgAus~~?|5(&9kHOaiK z`xLm@(01C)OR?zYi3w5e!p6m|v55V&E{yL@u`|2rYdjX?tQ*y|RFcI6xsHjav#Kwd{n@7D-Hl_t$=mi(K8k$}W(@~+-C;1RIM9F;M|eO^}Y-DvfaL`)+W zrIgg0Hl$1l(@Ew+1j>u+%MMxkf5mlfm0+#m4x~oe$$0RmQ^BjQW@)N(MLuZ`aG47$ zcA+|X*-li^8ho6R?~`^C$XP%@*iJBTG;6Y449udpg4R)z38zS)u>L=^5b;PeW=U z$^;xxi^~0EdluGhJ5FI1{xJ0EdB`#NOYF@J-w@4O++62qAnfz4-lQ8U^XW6zFWJ;U zExNDaC}Nv|35q5k888>fUofauV}%;=&jd{+uyt_z?G+tp>Y%+MpmKpL;{$flNVnY$=bWpA%e)eDV z>#NNN6}-Ys2Adrf_T@_A+;*KDe^bxZY%|S-98yS%oeAgg1?m&BjGhl03AnRfBcou| zOy;Vgq)w)`Gfe{E5BC?p8(%MH#~iE(5qqb@+QX^^T~j1&T=iN;vOV!(5hhvTu=%?b zO1>cG$&WiI=@vL0W!@n?^3pMQ26DuWydf&>YxawD{+gDE>Wpm$b^6@|$OY6_v zzXe`@r0@fDKCXccg8r$UlUFix@L=ZbbO?68_b+&8UBxQ#w&crZ0jQ>Vpf4)@<@|!n z7f5z1s1JL z-o>G2xqvgX!qbVTl)?x6=W!LyZuElBX>gui=Y$@q6S5x#GfGNQG5%+U^QpUg`q@lP zn2nnfH@E z{aZacXs_9PO!TT}a>jQ8_U0fGo0G-8&C7IG0r>HwWZhxNvC*jt101sMMV;Xl0}a6Z z8*0%3sJXOxa{nYqc&SDAn>nMXfSomClZAZpGHoXh5{B!nRK{_lgCp;33y`fO(acNb zh?(Z>MBKdmbK?y8OB7fGe(7h!G%<2FIjwo(#!%FshMlb^uau1KCct#-?70!S$fOnN z58Ug042p5UNN&FO6s}ta8ug9~N`erWM6Po^kYIu{CSQt70objt4S< zXBxri#*!PXW7*x*CP@l+Yd<;9y;F{(heSm`tqo?>{8=X!?nF*WQXC`U3Evd4T{)%* zPl#>FLw`s6bQwb_$2|n@%SD0=7#bgd+i;-FgLZsOlby`U?+Xp zLGAft;G`^^AmKERDLUjz#gn6RR)b!9 zY}9lo;@bkK!sivu2NuBLlfJ()E{YmBPR8=ysb7K$X@UDRE;c(psNpMG;e>nHV)YOP zT~xD&t%BXp2~2+8@Ze0kyg8pA_`UhKCrT&%4!b&zj7ae#MlkCSJ?#h@A8IEy2#@Js z3djrm*=M3WLbEP==UpR0JumhY)K&wA>`tOxF0G{FnwteIt_~!)(O=X~ z-N9f?s2Xv;3Z$-R7daJYxB0m43i4YA)E+hHaxNTzGqjxPI%l^RF7DGcl%WPFnY=&>7C-GL^*1eH_PsbA#|B0rCt7U zBiT_1dHKxaMUwSLp_rGU@-RWr2CV$Gm*UYK%E-2_+r*y^>smY~2&oI=0w0MW%BXEW zHDlDXPbMitKahS4^0au{U^ z&LnlBB*o3YwdtDogdKAO5iG>bb7V2tUEDUN_uartxEc9CNf-}>Q0HsFPtjWPKJ8Pg zXdDRWjQvy3hb!?w^ai#a7G;-DsYHV*!k55!uFESv48l`dNWTnqxcmQf6mdf8e2)^s zmE1mnkn{BkLxM;E0000j2hY!CP?6}&IClTD-2lK~U2=03r;t33J%B-LBd=8o1njCz zDAAr&#%JRr`505MG`lc&rilQww%!~-dOkfU?V;QSClJ?bk;BSkgg4h`A}bdVVE_KX z$ob>JO;GxA3P!b8pR*Q2+mr18Zf5A$(G=@^?*(a$w%P)!a)iDp_)B?Plc$N9t09d8 zjQGk_+>GE!lP5hPex}HnLsg2ehP3=#9jZzEn>;V^X)O+=02Q4Oh1%~7Ux|zc#d0(> zI^stHgnGaeMRvl0={DV#$+6~uM-uRis{RzBUOJQ{iDj=nJ%>FeeeWk1kw>aN~n&e9frp|M1 z1v~2&RE|#VGb&>k%%S5~)xc zZkptM6!L;L0UtiOS7G@C?oAb@_;Xx% z2#p{ELo=*l+jzvvOq0$xOIaSUb6fQ)yJ?uiw0~DgSRL8*jmk|}51q?EtI6JbKfJPd z_36I_RhldHe09UTgkf)aolGts!dWtH!x&1F3AdSgC3%CAt>!29lwt_--iEF`r`)a| zqjm+&uUG_&vD`!wG^P5=0jU7;Dsru;glL;A6oOrRnM_JX@lU=w?(EanOC^rdQTwN7+av;KmRe5CF|#9I!8@ZM zxgyNp4rUt52Co&Qc%6HZZ-GM1_I*(N&orhSJ7TDs#9ahjrDQcRv2`WAIT?!ej`Flu zFYIx}?1v=W0Y=Y3csL+`E#-~D2Ehq_E$n11b+0-95CQ%FvTICii~eWF_{n)XB1=Ho zu2mfT^z> z%NCC$vz3CSHRtM0g?znv>HSF`S8y~9vH=k|ZB|yVx`H{7vJzZop8JN84Y6wJN$1t7 zJzsUHp1+ABuT47UbM=^S)d{5btWKg|k5NqhiI5Jr{Fo&IYyXiw#vuakI6|zspn@ZQ z&&<%JkSA^6$YK3$C{4f*chb1Z+=&&*$5(zVzuIMsO=#LqzI5nN=7MOU?MNM_+Lv-> zPdHg@{S)@vAbD`diT@sV{W9>M|6^&Uq+exh;t0cjrTI0A0KYTvk^fJksB$fyD#jD7 zfGKf}HrFoOxL>hK;ZUCL%cIoY9$JDsK}F$pkvZ4%@Ho*8G_YXHqfGJFxoar|RXsI_ zl3?UxXI%wb9C`bDmH6$n&R=?jHD0M>7S4Nc8%%SB5jgl!E}JNmwg(OpV!f}kcJ9u|!iFqVzQ?8P#NdZa> zEzdJB$E~b!lxLlZ0#YySD<}?3$X{(QnK956%HHwAijFR$$If}iHEy)e-WGNuwICd; zu4A*n*)=YGUmcq2y*%WJl4jzt`e9g076wk~&4GsRcaHx8O|tNbpKI)ciQ9vKx37oM zMf}EZd+42-AUDhXuCATN`xw~+$;0VbR$*p)qe*GOgf){`PLie5C2alU@$VL6>==lnMmX_3*tx~*s zn_6#bH>v;6=b^XSIklRte`1*kVe9Mfa-&f@C?H74gDW6Z8KC}!zH?|xqDkxnpFP%c z!whrxo9X8a1P4y zvbA=b_2s{vR|joH{8um6tMyRenz*PfNCZ?8N(N$=upv6K6sM_o<*8>!C0&3qcsyrc%HMp+o0e`BGiNCK*{0k%j`jgSjTFuU~llr&L&-M z79#~K`lGH;J?XGP%IPn*)4*&(!G`+CABOD$#H??rWzE)JAPwx!V6~#m44&*n;x&AR z){O`g`?WCt=A^SOobxuAZsmnCkunD9urJ(n1wKoKFXuRXy(KO$CVF!p`XfnPM4$0; zklB>g12J<^)h$0OmRRMX^}4FMSQ|TzU|<_rJ9O{$Jv@!INIv-b)?+p2Se6Pl#%Y zHJFcKtws$UJ|_i~kLk&}GQf`IxDkUMUK z$S5mOpQoER1D+&KOb-r_710tc{dGt!A`8>%IpwS$ePlY*$@v`deJS8_1HnP0u9;KR zRnO`toltw)HAhxr zTO3`i$!fxI{zjM@&T4XIif(*==9%_sf#?hpXm1cYzL0+lyF9*`kK=F$%yP+3?s?r> zQ8ZA(d+uekCQx&kZ+m8su9{!`H=ctCz%+@W6B7}VFD>1#eYmF3PqOPEi)q2<;!cWMK|~^9L#rcfNSKEvAe&cXK*J6&DrI% zuKE^xv5CyZC*I{VC7Thi#5IuZG|Va3E9XKA2RND%;pvi4)K=Z|sjrXqEcsOiqKjYRFO~&Cgfh50Oak_?R8O}Y(Riw2FfU>#lvl&!@PX+OBo(rn z5_E65UGRqQBg*6j^s|E(JZL7QeeRH-TY*wdyPByz^3}@QgePK=KPsZ_;O9+6o1Qa| zir14Y;NNc<@10sl63M%WlqM%)EgQsyPGEc4FDNvl{rTVB9hIxjbvlg}-G6-~H|=Bl zM%;AIB6_INwe9U4va`b{U5_df?@%FLw*~`erI6pR33z&52`6KMQV%<}b9+;Zvun;8 zKSW)$#(bi^>~HHCjRdFgyL7s5*6~1T`%=jPD$4ND@$>$p2b}A@T&t6VvFjyU zqTG$NEX5LxB04<#B6`Bwp8)v__1FXR6Ptj^fpS&+&RDVl(M)aXvBV5d6Az<2Zj_LW z+agC|_26UiX`us-)Y?hyjca#%q9BG|j}@l#@7=}SUP>HwD_ft;VHrh?ku5K(fYnNg zXSVY#H56641URZy=kE%$-TO%RghIE~QXGG57?>U!r5xe*DfZbUxQc zYZ41T7|===3jBBJVD{^jU5oc%V*E4Xyaj+f?^jH8qI$ZoWd81~eC-_oaSjUP^Zsgqm zucr{D$Q7i`dZ=}eKYmh};KOfSZ5U9rRznay;E`=lDOo>}kxAMMnYI*(%K=^R00!7# z`_6X#GnIr8x8xv`j~D9wZB5o?)PQ2@7NRPngL>YO00DE=rUbg#1}-mWRG(-T4SY7G;BVpU~aA%T{#PjLwS%ivVHYmU3=+7eSV- zT!2sfepL*s){x8d)#_S)wzz2`GE4qo&y)hvD*CP`W}WyM{t0 zM(FYpmpzxOlXx;ZaELr4N$V$YntmBH_O72?ftNVn0N5@S0!vD3TfT^FP9_#R6T*Pure;L=YKZ>qb~$@fX7*p!cUKd=zShW6CQit;p6|0zE+zmV3{hp^cS|SM>smht0Xq z56FlC!!CU<)t$qoru1?P_8^4XO-Y9p-TWlL>m+QB(OFcohY752y$7AhxAofBQA0rpGKS)i75*Ii0j>5K>ES}E-ed$cQrC3eMMzj?HM z?+qe$3h7X$Bz!W24vuF26EQ~kriihCa6^Bc-AasqGfVa$9YR;4wShC0s)&;kVG6PS z0HU=i!|Z$*Wx)=#hJ2Up^KHh8fk#vsb%d~WRT6IElZ7;vMFg{%uQQ8|uW?DLk;yAH zh`od&a0c;_)wZiE2s_Imcog@yvm^J%YYZ!I=p2DUOd{g1?)oP73>dPJn3G7+`haVIcx~m%rEeNl&ctr$pU=c6k6sRf`Yrddu$`)oq)&2z|%|H%Epab@Se){Z5u^VdAQs z(H$~MNS0!L^6tv$ajs~K%2CYAG!9oa^ehA=ls-dA$~oMeEYw-&vhiBgKz6aFOu1=0 zYT<{%D}^jTSe2dgG#3YJ;e2S-3ByCO%}K*U^XzsP6k{3&P~KoO@a5o$_LSjV(n%-P zQECB2j3}u`#rNn$e_q4u_}Wkri-c=@;}POf{c-GE;K6AZTWDCs-`UQOH*5t5?1Xy|L2u!=3v$XPZtl zKlt=velIzk`&HNzm(3y&;1YgR?tW#~+WZL#J}zhpLP5$UQvem{0?l%DyJn@&>V`|f ziqWEKmh&6L=NoCQ$&!zC)?rzOYa3@_R;y5f<`TK~TFsFsK$p^3x(r$IlB=zN8Fa#o zD*gYe@3!{16MA=%`oRtMVn$>`)#E|+Z#Ek~DxRg=6$GSJ540^XkgjUpQ4-|=tCqOj zjloGG zz={C&@{1j@>2)*B za2T>gmL2NN_nAA0_rBT@>>@x7c125JIh{Z8^+H2S!|ZLQ4|8FabnO5!r%9BG8^N7N zjW*1W?$MJ7H?A#8Oj$P!#D72L;Ef>r9s13U!!~BCC`CNeTeQtw;J=p^o&3V;Jws|I zh5j1GaAO#CtsTL?@iWZjXvz6mgMtz`tt_dsBU#MW;N$mk0Aqr*ZR?ipf3NlPMVKhj zQ|$_{A-hPM%;p-Ke65@|(b)&|@XFca3{Yg|TETIhsp?kBse&4wzkDctX&hVVz&Ppd zw;=pF`*}HC{1dh2j&Hh1{IOmcoNhrFS$jbD?L11(Svzi`zAu1%4`zR-!V0SQaF`jO zhMwV-E=TETbI6P+4nAr%EY=Mbh$B4di_|WrW+J?>zb!U2Z&MSsVK)?}+J^|HczmHJ zrP{rw)zd!+sUekXuqJ6&$MX+bTgaHT`ny+SGWz!l1mU3Lg^9tD-q@OX(J3UL2?bcP z0I0@{EY}R9*5(ERtZb1UT#C}mLxm&Xx4fldk-l6 zM1h=}rgfdao8pj_|=RTki?)>KXFq*%N}LL<}X zm=s9hfb>4d`dHZQo7y&7hEkgyF(k>$0cqA_C4&GAP>o?!kV?~LS0)tiiq`NoLp?CJ z^-DVyp5>~><}PGd7#OW4dC}hQU%A!$w6EbgttPl|ex^m}bG+5Eq`b&ptx1gb>W^5F zm5CKyGY6D&pgF_${Ds))Vi7Qh)}ML*tv&wW(S{Sz8)du^iUu1V|K-}~fWTIQ25&L& zA@i1eS!#P`ypefs4gW*)Hu7F&25Ph?EM;t>Yg{uFQU%UO;sOICts9Ro_j6SiQVcRB zTFj7vV+pVu5u$u;2E3ABuLaIIi^ zlYjPwFWkFI(&CZLCdsXU)LWY{%;2=76BnS*uL(x%+AD~TMtUt(hoy>EE3EEUkUaU^??U@IqDFtr&In1*I6Hr*O4U(nkIZgx(xqG5 zh5~5!m1bo<4JoQcgYEkF`sEm^ra#1!=%Fd?07f!=YSPHIA_@Nv`&obnMH=$0@qf7d zNKh;rW_DC0bz0TFj~mxyk+}2=h5jMljAI)Efr+v27=fb%x3hGDEt5NPcI3s!f2xT7 z>IZoCHt30>08_9nlBt|1RcP^DZNHz6G{6Z zJ1p@5KkELCYjtJfi41ysKmH2a5V2y#eVbV?$oJ!2#TXak#gwPS=O#v5X}Z|7B6nue zvUz6_@DX3iAwmA(K%uw%A)KK99x7ybJwT3!myx{K<%uBk6Yxu44<3%M`-cUsjU3pQ zCW4?4T;ZC_AUb2CJ9UPvX`@phW%l%Jg#}6i!Eh_%?%@2@No~W3uSE=MU?lSB9!hF9 z?U9VODG&H;FTP97fqs821%TbnLv2YKdja-KawC1h7dgFZAh?ir#yd8S5!5u zJ{S%*vALgH_Uf`cSIOsZ?@TAcTSb76zE_XqiHcuaf#h*iQ7a(rTQ^kplw<5b)0b2r znkaT-Eq`hmQ@rRw1j_?>0(JH^1?MpPmHo%An7&@D9u-7HgOVmUeilb4dgnof(F_}W zvb1#`5S@SVQiNVwAZG58P0rsxy;ijgyBGO$-nZ$We@Dw?H*hwp+RcLLY_Sq0i6mlD z-lUwd`ge1~!^PzeEuH*~^*T{b0nI1JmcE5mv_%srq*5}#-5JO{;WeTRwl9LP*5F+* z9{p~cQ*(S&CVVa_xi}wsg)WlX&Yw@7S6`F_vi4_Vdwe?1X`-fH@R~05=qAg#=tpo$ z3tZN!TrPySfy|f(|zieR}A;m#gvZGyysnp6D1O5JlqceWId1HkVE|5XX}e#PE!V zh*%N)<=cbh{*N6KtgHOeIj|Rf+W<#hZ5{_k`jL*)_e)pejGIuATG;)$;aIHD;|c8+ z0&4MBeNu80OaCpKAC+Mo))kJs!vBfM(O4$mjXA<4F&lz0;|Gc7lBAljXdAy(0T_Fu zRev()@81vXxRsS#*UJ(MOlRFnYiyX>OSD_XDo(zjEJjs`*5rAkv^vF&6mbb4=zW3- z!iHwMir?D8Pk0xLvgA7i!_K9LGN(&2Gu`U~X&LfFojIHr~1g+ zPM?S_iQ583+r?0+`lI?;*J^J&1oq4E;d>qXyBo4Oi7lzsH1_2bA*r7yU)U`s6dU7) z13~U={_h)l?k&|;qMR3wUi`FF{!@g58rboWggF3xlNy1d$i6?d-RJkW`Jg{TcsCU< z6TV^V8hst#4ZTvnwGWSyslg5~oz9P9Q+&+oRuUz%FD2TqIkqkesT1TS)nP$$RGv24 zp9?Iio%N|9;k;|a`%eIvQHUA&I?@$KxfP~5`Z=o1+Gh$#e7>2erZQaMY-^{@wl#nw zWq834=Q+povVMJLaxX1H6p`;Lv_L+DJ~SG-ztH>bP#LwQN^Isj%zaH0W39~f8^kzD z=!3AX!BLx>=`B0HlYKW8P7gCbe6af#-UkGuUE@d?pMRsRST)%CC94NS{%~bBqpP%y zL_YLzTZP-zIIMqGF*&)n6_f!`qaiDRoBhr~-t12-7l0^WQ^!nK#mSH{)x$(`2q<3O zOeg;3y7dEK2$`<9r8OIl1_b-0#Sp@n z-YiUM3A#anM(_C5v3tD=onK_XLs;ls+=a*^RN#3AH9Y^>{_Bu#ED7GLWWpa_^*JF; z+mHo6Ylh%A$1=;F>37F_B4aj8QlFqR0=Bv$ZjZHjGsh@IJSWk-FhyJ^_KDc2n*Iv} zU05!DSJ=h?Djm;Y`Y*8)X3v>7C6uQe!ZXJaH%hC!^9M!3jvlTTT@(r=V_RkgJ5j5o z>9f4KxoycBPdSX#O+#+nQqR&&=l~*5G8S@te8;X4ksnTaZ?i{c>py?DxE`YT@gh5P z_M;r3Xl4jHU?jeB&UZMh|3c9Svk(3tCdC>Wy5hdUrlU)B@hi$Y8g}gD-%eAvkDX6} z{}#<{K${cz?ec%$!%jStF-4bEM{B>PQDiGWWHL0DShAyN+AMxd_P;2f0uaWNT-tl# zb(O|be|Yo3%B>nRYmhFE`m-(9%JZws7KPya(}$xxi#E797@vX)_!v1TM#AM^+M-g> z2mtBqga=&NaW#RC{J5b)BJGLc`bqCq4#~r_1V1q(366J#64qLIj*3g<`i1-4Gqe$| zq8iU-#-siEnXCl_JVOn3w@E0xURYHLgNR=nuMuELpraZtLXrK8De|S8TdKGb!h;3! zRkzV}YE;`>V1R3dQ6kDh8CCq?ihU`zT~pWOczhb(mE}ck*ERFl_C7XBM8J!epWIQS z$$3YuY^euDuf9rS^G}8xsgqbrRm$@7pe%3PqE_?uPAy_cFY~eIt9+mH?qnQWMi?#p ztjtcCK*CUd4WM?(K7#BGAnR8)uoI&|bkrg9G7BC3kR94NgKDU z-w0hL0C-)?gIh)ZhBtL(>ImSrrRc(p&Mq+*xAAQ%E=6Thm8wGixeQ2XK1nvaR@&h( zy(PATKd=85 zy`&YFCNCyI#n-uu^Y8zQe5a_tcfg2tNG$t&M<^j7g=QnW<&!!o|Dk`9qtQ<`*4{0} zhMhMNO$084w!R#1lkw2>ZvMzP#FUnZlIqXoi!Lqoz9tWlEAmpV-hwZ5R#E+UiE@O~ za!EwbqeG1V>PrgH3=tdMPP52LWPu1w!~->4T>Pc+y=MCl$zkc(?XLl799Ixvm2wPi z4Ydhfa`z49<5FfPRrXKvMs?dBR=!j%nbz2E= z!8su7LS@i!06a!tP|A+wdG>|M2EQU0IzwgLQ!kc|#ZecLNnpneI%%V<>7-#J01QU> zA2JTEudI;kiJA-8XxFU>&{3r5s7L<~$blRo9+dJkMnT0WY-^>dABa*6@Gi8ygW&YS z0ka5@0b%%WHU3}v=m3X=+i{(62&sU1#++-`5DC=;e;{KU*1l##t$$=6%|uWgU2v@9 z;0!s2fV8l|u40z%X_kkeR!{d>UX5~tiCG6tHrL=;I+rQ&K<`!RXbuX~$#htVo?FLo zh)NaS=vdQ|iiWtPnxJ_tQzLoT3lehtJ$;>jAt|Sr>j{`BERKf-BGlMKih*gr^+1p-O?RxwW&mG?KTv0DfWNgs0b{7g(B;>ti40+v<=~NeHQo z^x-2uJ}>LCS<%LNBy^c$ndEh0Lum4Ii0KsRg5IDQ-a?Ndy`6?wfOYsX!zV}^F^3ww zVVWh`*>S3T=RuM4GF^7Bn)tUg%bo8{L~!;qccC=kA?q+NCRFe9pGRl{sCE=0vi)ex zS%Gt}Z)Bvp5oP%0&BGAi`Y_=$Gak_JqvOe2tkQo|ee*~qUrp83{O+Wyo-DPA_gu5R zfA~hGc;6sn{zK#(#mbF&d@Xb7R<#X8~u_#Y0hCp!wk{=;)<^%q;x`S}2!gZaOs7*@=LCB$Q`C~CNpB`$2LzL^|zW0x>vB8_#_N#+XJkLke- zI{wgEfq`X&V%q~T9Vay3D5O>BsW44M_VXDVt3GP|_NU&M5vrE16=p?A&C65F`8(xu zF(QrpAAhi|u*DHxft|Wh$$5EbU2s8n{16-`bQAzMW$O9{6RfX&)$=u!WAP^m*A|t7 z28_kkVMmCWy<3Y`e2VozexDe?CX^w`bYOyz^x!NqY#9 zCZ@F>9;}9DP5LUW`_Fa`3Pb(|)CW z>;5ib)}g4x?XxS=f9blLiEhOhX}A8Ua1)vtLJ9{NV_Ehj@m_}t#)^uWK^IzOz zu22pb9v7CboIBZv*{Q1}Gq?BXS76q2T|4@96MrZ@%#~W^8Ea#7{oGB|OZ_>p3b-TJ zxbbXtFeC4tQ4f(n#CNA{YJZv;*ZKB9ahL1x1ejTq)UV_{!?|tbB9@h0HQ6LjG>(2%siZ=z-F%b1r?~b7Ziz)<-!DrVLkpnFd@Z-A{au zuT0+IKn#sVh_U-RegaNp^hHmV&DsoZhml{73e$;oRLxbM51Dd0)IVv$iGCllg?`2f zStJ3Bt=V0ehiJt<>Ex^Edq_B7Zg> znF)*gkWX@B8zJgj*fQI4p;f14!^a)Y(x~b-{-~Tn`4bYzDdGSCTcR<)EL+bY@9`)? z%mm{kGyn-WMl_$1^KbR=4TYou&=NZV7jiUWn{YmGEf9s*B)J0QIMe>Oy%p`!_ur5W z=@u#o9yRi(893}uLNNQAV1&7?l7-NrmjyEZ+hFGmbrgN#9Yc)}^x$ys1IkM5+NNSM zlHZgm@9PN&|Gd~}Hp}5Z((=aZYw$~fFT#9$@KCtz54g0Otjjgv#=w!~OeQ+*j_uw; zl!kG7?}8A4KKhGXB->#K2Fx>uyY4B)bV*J9BhFob=+lh0*j`}+rgCmD(^uL%utN%F z9k|sKWbaNb(HW&J90qX$<2IudsUM&1u>k_TL8k`Q%))+*3vF6%wHU z^vhqQudOb+>oqf-RJ*fDTWO)$zX{~b zB5$wRz8UGNHlN~O9+QE0K`T1|_!=`XoW#Jrt^r46_C>MWoRp3jMfz$>`RoS@E44>{?`5^NkS32YVO9E*#Hl zJqP%(i*vrd@5xwGLM8XO5~PtiT@oK56&o_g#Rx5MyOea@&cIU$E;GxIYLXg+2FEdv z;hKnHTg8?1X1~(e+40-n-c<;HykGJB;7;X9m z3a+qG9N3H`xvtYAS4aXkaCLZzD3k`Mw-2|ru_lZjn{P_!gsA(woBD?U>8&0xYO8in ze-;|$7m)(4HPzUG)mDbi-f}Z4%O;(gXG+Wy36uJO2is%B|$Ri-( z?ffa7Kd4TaW_rts#rNY~NbIfaB&bxmwu)y-J;=hNR9#ZE2W@4LE|i;aBb$~Cpo1Du z!)tVbL)jXScQhqTXhUdGd}V=ir;}9{U@pF|k&NKg3Q8ULQ|n=)vGS|3)K6^fLONzs zVMsa%-qcy38~uDW2k|S29uFE$K_C*I3o4$id%&jJ8vMgnYe-kCIA)7q677 z4&5FptHz7Ff=C<`J{T7fcGB<(Pu8kR7c*@7GCN@_O8o!xQtL(l4p4VNGKs@fSx^u? zARr55%0I5<$I&E4s{H}C`(?x4q$hMzgzG!JrtF$PmAx}}&%8)G1}10rgykMwwAbA% zA{SV|FX=K_fO?q3V17R&9Ha$$g2+!^pfry;rtlK1 zvrlT(WnI+OHm)((2G)l`cg|ckzhMgT1=;Bb1L9x<@`bQ+a`Vc>(bWkAkDDSK&b_P5r#qNFmP7u+kmH}69hSe<;%Kdol&og0v1=MOUXW=G!F{DjWq$xu z)lQQ~ow5Dk8%_*+Fy`%BTE{29GMWKA(|ou~&{PWc$KmL|e6W`21?0N|6WiMqZ>*9M zRB&O@Z6W0&1&d@z)=C>IS7- zo{@*1%=yMnUsEpJ;=FY!O`XmqDdF%H2us)U9fFR$HI#2A#I3ZGLw%%8U)pP&gFW-Kcb6iXAxrD z5?p6)nDr4Wm^2U$#X}y>^{R1#czo$bm7c0p?@N|V24>_+&t-ux{c8EMlz1Y(QFi3% zBU|VP(f|NVo1o_5bgW@$1&=Kh<_eqLLGr$3PcuQ4Len8p`!Xl z-c!0RoBLx^Dpe`C$g?N~IGCcJip!@cf+TiUTlVAo$|9AuE7*<4M4C(eC!J2bO-8~3 zINj&NOnk{SToqv;w0`G*o0PhO{)_KEe?q^1hjoxP3SYHqj?Q#bWZ~3lE@ECskK3nR zoKxOXQ;>}kw+#NEMZvpl0)+F;(yA#KUk8~ghj6USCJMFrt!{%f3_Y(L5$NRPHzB2K)b&% zMCm#X-ge)Po;6bNy5$IY)7FLI7eD6!-nR3kOPj{SMhQ4 z&H?IEq;f_n^r=%`+)&D91ypD5@Hp4w<0r=zm(Ke>O+d?gf#W?sHcW8%GI3zO@DEn* z#D#7xGxGb#)89=otRok3XC9MH(d~-9RLaicY%d_rT0XjMc*Z6JIbQE15S)ih@ABSFpOMvmn9>m;z zm)8+X>U0yvm5QBzAWMX0XA$TU3^{4i6)~c8!7n-jOTqds2VviSurxzvye_p$p8D&Gf#M8f94iAfA#qIN}g1I~vhfph-i zby49z6pCwc5mTSV_2v~lY8(UtA?AA2r@j)WZ>DE}BJ8_@GrCbj9g+j~DC_jv1(g~4 z1(Suo+l|)#p=jO!1cUEXYdWCkh7IC%#4ZK1pkYZaVp!SJICC5VMu1cUEGfPKLgMBt zn#cGV3_Kbe&E6=xf{e4x^BFRr09A?5>|x}$lrxLlNty%7b>+Qpw=Ci%(vR-RbeVZQ zrO1fjkW6o0nH0c59%1!v+!ARh&t*oppA=rx1uRl-VX-jL+HmsJ%7>odgm@z+w#FWB zqb!lj>tjyza{EwMLJl$%rXm=0jB8k!YRY@H7R3Hvq}mmEzz%%kowWUZz3b(Hj=H}u zGb+M#iK*XPi=+lT@8OK8JPP=j%t~6PTNa#;OMV%{iey~~8_dzktx<&(j`UhnsKNob zIKn9Npv}W+ELpMc|XPzC}tYtZn=cFnoSJ7AfFne{MLX%Vz0!~QY4$EmN@E3w&{SS zBDMpo?CN<(vSBVz*EkaT?RQ4M&CYcn>#(a{G~RMW%XK+pi%#;Vi*N%N8<{e^1AZ1& zo9lG~-MbJ$eJ6H}L*0FFe1UAlj#&E|<ph7_LHIAJZ9 zUp~sSBjO=ydaA9G7lgdm)d^(Jz24z=)Lil_Rt5o_Klv zVda>@@)Gz2Xi(t4NOI3n;y1(u?Ps;mbT*riMXW$%Wxx9V5T@c9#~2?nG}c`CE^umK zE*a)17Z`G|najGUy026?!<%J(0D$rcoF7O>?LoDv0@^&}%-caba>x~eL7Aej?CPEQ zcLv`Wlj~6)p8vTWS>`Uj$`zCkb2cFp&}B8vgDcv*IjCWXdW8ows*!MSL66~GmhC%-Q zXN?_o+;56R7aOGnSNX);+VWgvA%DNZ@q+9tK&G3-v=YYm=f{$BK2QTZNB~<100jNF zO5#3$L;w^0g{rU<6dS4)!fYqDe3N%OgQEASN<%FN+EQ96;xDI-`YQbnR8kY2e=7V( z97Wj1A@+1^DTCEY=L7`kK7D?YuRi|Z2rA!PPvDn1iTQ!?VcUAZ;zU{sRqTv1I#dB5 z7`P8CZr5R=jtyN710g06z79iZ6(X9OCoVf?Np}l)h{8JTz+G^`1X#6|PTLBp7N!B! z(zVYWNZn(ar;o!(_91%8o96jfMxC7zr*`w&0aCWzBD6sSdg;D&OMxV}BVDtEP>;`7 z>4QYnpLx3SKllDyzyLA@<+Zn&f*El(EMubdu*YA}0kn_JaPy7%yxu-#t*Ep5v53t4t+uyF--BkMh3@1jGD3#(I4Vf8_}R+qLLt!6tT*l(g9qT9 zFM&@zZ-MaqgQFxFrh4xMqH$2##|3&)S8(W2f2l)PTv3*L3B`gj#Hwcreu zuj|JjpC=Nh!R{GhPJ18ppViTAr6L=c=qvsZOmWjHTe5{dfT4JXg*5)=Tq5L*7bljW z@uO^^3%y#p*&}Kl+bW>)Q=q|wj1RFgFc8q=xqVdgTRX2&l%!6ANWvFvfPT2&1#0ac znXd4f2mWuXiT%u~V-c}3(I z{aFhRDS?$H0;^8hBK@gaCyk$whb2H^Fcm6T!x0-|x4nJbd@15!wo3I8f$$N!WV#aR z$KLc#bX?QV2KOjNh!e$jbKhRPMFPb-5d5ZU?2H}8cO0RUyxgry08)Ix6i`3096?>J z$xg;Z)d$T}5de*~o&RBy*t0pxtX0!F#1k&g7^&w1I7U!QwZ@;lEA(5_EjSGp>C&G< zai#La58NIz(XBnoEvVRnpRN#qGr+zVvs|NX{;17%*TA5I45$`}Xzp&oM7bYa#`v{7oBjyX-Y9ww-fVi?Fb0R_tnr#@LXFN|@ zl+kfflBS?2{hI;hkGIk1;i8vYU)f@|KHe?VWpRW-U;-S2^+_7cSOtYVDO;iS`Z8lU zL%!{z&GAUec{Qpx`O*9Pr*z)+>5=YcCD+suCB`chPDCxNCPiT=r8*c1Xu!5DBl-ny z3YaM+L7b>G003dZr+5owwPPb79U-_x@+!dUBNaT}K zesh2*+5Lge^|O`ku7jn*7c}lNSw{5YPm*%Q6bE6f_9U%x&9AD|L85?NJsc%^(**tc zTAiQE8)O+D3;DK`(g`I1z>|bK%qoFsP7>%|14ZPVkX8xd)&uL!Tp-81XNby|*oc*( zp=4{NOCaOI+E5?@V5mN-|2=*3S zKvj#^Wa6Y`K2?Ery;&27B)z(T%XAM~8^HkP(wF9hFP8J*)3UYP8#-POq+`ute5T$p z;|%Es*FD3t0G?j?KKDBVqvBIu3Fad?mRHlzrLX2GKC5iKI%n1$m}wpebMJ=(THtan zgm=E6Cb4#tI9XaEWTtu&x7#x>3Z8OunNLH7AEl*ucDP{9<~n)lyNYqdH7UD5@+<^M zp_Z;T-s;`{R$B>GQqE?Aw?q$Ms>$HTVZptH?XI3|-ee=zJRuh%QRBZdLGsj0OW$u) z;!*Y>S;45Z0tw29uw9|-vcT&)w+6@zXm`etP|RM4hwGQ#jQ(x*y;vnh)k*lI!hmpr zE3i>Ka_8P;bHsL|K_+ALCU4_0W?6kOtM1NsqN+-;dPgkh@F3@Zs-rh+%>n%bayG3S zh3@_PSjMdN+z)D<5|mXS@3P&e-U9(>vx5^XuiWe(J%f1pZBeK5JB!oh*a7mw%ztV` zxKyoklVP(W=~ByrS)TqMn4*W$EW%H_GJkwEw~QLhxWH=7&&Iem4^C&jkqx^>S!inS z+=fh*ek8FYwi#vR_4AKI{Cl}q01hXSJD%w8PCL_KXqd{POkYhW2V-Jn45_i?r1_Fn zBd?VS(*L&y|DwDfZ zgIOb-7veq`cD|tx!1n%*n1-%`b>SE#8b;w<0}u!Y0n+DN;hQ_Vx6Je1$Pyvbx}=hh zd;W4I?mGPE$Ls88r}Uyzl_AdOnZv2ksQ~6aG$rg`hWM26LEx=Y3D?+|?-QX2xTi-s zqy%HPV{Hf5z2{MRsFd$|VG8yzsB16sZ>++Tp(NjY`39{9DQ!mV5zuw4J%!53niKJ9 z+wG~tF3i**c1jYOL=6&pi)Ns2)U+FT6tv6_er~wIblAa!Tr%Ip6fRF2|Ak}WdpHDi z;a%e_u8654-l$8}n(FM7h)eI=r8GZsj44A$VFz;B*DAv)C2xIjIxGiFvQJb$9_6*c zJnux{qAw4Lx0@J?Bvd%j>xfkYame6AM!6b8|DGgwIwr&eR>9~0tAIpemCkX#6QwAO z@Dvy+MlJr~8|_4kn5s+@@#*_&u#z|5)L>Ea;^pUc7n^<@70{0)U;M=a}P zpOr-1W~jErF#sCfdmfd+0HBYD;Lxe(4QzP+oPzj_2jE*rpY{R^aFB>_PzvXe@hpsv zYv4itD*-r)Bs_F0P|3;X-vg`Zg-mVPpOW`)o`KtV$_ozBojDyA;Y+H9K$YyNxI8&U zRSS?=W1{>~;pI$zGVF^!O6FbR-A_IIN2J)o&7dtJTCrjJkZSTCrq==+w^(!H(Rcga z2<(gHpHuJ^zXt}3IN-GIxrCnoNT6;eE}vi3O0X&X^93kIVpzZ5t7H)S00VzDs_(Fv zEZHqaeaN8vLOQ~0Yj?U7P;1+!%^;Q*?bUuHM8!zl`qH{gv@uD?nF69!I_55Fd$$+O%j_N(+vr}hG6DgicPR*?IO}CWj#l1hkmkIH-=vUtYz^eJoc_9J z2R23KYOx3J^^ni0n#T>2{+I23!R^U=&_Q=ceHbu%Z3d8uTjl?7uf+o+yIOU|KfQ&^ zB@x{eZtx=nK{PLplak>!s`d8QI?ueZ)aG4vKgKkplNP0orXu2Xs3;l{AxZcq&&M*b z>dN+#i7A2#1}$E>Pzty|7@uEGGzMGgk?__;CZ|N#qu6GmxtIcEQ|?>{grlyH6qr%` zXZS5c%f^>BLHHv5l70WB)!mlz?SBJ_Z*V+J{!G$fd(2x=jixWEazB#JX~)KcL+SBQ_6WOXNpaf1diCT$#Q2o)icjY`N^d~uAM79US*X6rWu z!a=s-|LXFkk|&7ml$#9*j^d8q=@aB*PEntV;;%alT#PJS`gDKn(J$S`XTlvsMjV6= z^rslXVF2Zs!cMDwN~6I}+ME(Ufl4)M&7z5Vg9hUhy3P?ohCE(2Nx$&nXH7Gs)alBG zW*zd~=gqt}=&pzHal6&prpm|H!+NMtupAxVzR{9ooc_l4)R4>iJiUB{pYz(X(2rRh z3zr_Ng}+$1)sOylTITPi+K}dJ42xSOzT?i#-0Us%$80KsAi@VoQUK$(NYkv5l&nTi=ZbRFuFkdEXRs@L?Hr4|KAAUl)M;g&$6RiT(LQ2O3b^$PR{~ z@IeQ@Cs0DxPDNzy0x_nxKc;aJ|KVa>wypVoXXGzjshYTv_{*31$XIl7(bGY;*(SK> z<6$^)?Azvd_GBEu0{;eGE0hKeSfzKs&`A|9?7Kj12sn{CD6f<^J%Uu_Lxsf~45j|d zqgu~__SvKb5+!zhQ%Nw%+|Dr0L;Q#9dSAr!gInN@^J@59`xoy^8DBm8z-%fPKP6l| zi4r$3>g@l_Whb#{wi%Klgs7Y5wWDwGkqjx!qeNOk6`uZR$zur*@p?~Lu6iy zrHilpx+?DpFf@cnw{IHg%0krbS*;cRj7G~FBk&BZ0BOWvODbkoDM{4CffI`=Gu9GO zZAGAJzVM5&D^g%pTxXIc8ah{lg zoNms-4hM%QX~r|MH^UhLfa#tTyGvIpwdS@iD%XkCABCqO*Bh`)j!cLi7-MK4NWs{z zRZLa)qo3%Kemv0Yxew)T2fx9SH*nVh0xb{;si+Ss*OvP0q~aegVz>Hq3Ke7Gs%R_J zSOm2r-vZ--zEE7KoJcjXBOE`pB=$Ev*K!0*F&H|y#!vLYkW?%BbI}sH3|?OFEa0$k z$uKxi4?Ox-uv&2Rf?@`fLH&c~>*u`a2kLHSLm_hM{3*hEYob~d5QlRU(x1l7{;?6g zU4t_^Y2yonvLoD61qc7zyNd~GoT953w7S&xIaoDK+HKlslhBJ~wY8TfBlhJ0T57~_ zVq{)f1jDEicY!}^GX%tEBgo&(BN|x}+M$`AGy;L*4uuv3r!QY(l*LuZYGT;+^&|!pD+FQ`#tt{f(^Qx>hx$7rF_XR zdG>dqK45bqexnHtjU#OH%OK7iC^5MAqYbyR4E_zyk&B}%5ro?ifCMXvHVu51BI6Y^ zJjhmSjySLCj}NVWVhn?EmC+?X@hFg)W%{pRF=M8hiJ9DkN0xELXeDhTqIN2~ckYu#1@#@o-w_+EDp z*f&eQ6WXsgoB2fM(rSR-zzCjN(o-QKOSssSAd+Rr6tvcZvJ1@Fz8UCpQ0sP zWV7BPllLN_ju{ODhA5f0A4T%0A7NA)j9rB zn4Z1j&Bfg3w@o;0O1<{la0(Nj`V$Zmx*cmXz2YTYAk7HKcEIybU!$13`7KW#MiXn= zUQqEzQ-DLr@;6Oy5nTEfBD)=^%t_9U**>g9A>i9`4uTlX>zn5175^kfp8?1ybL}Jy zMQMdzYDAwf%*=HRGE-2ALK= zr;-#=86+KFF0bMkWM&SQo38X~{R)c!x`jt{y!$AW?v5X{?((QEY|}zZMsyh_=0#hj zYss)iBz`*~k-pAC3^j_6X#-@3VJR+|A56%Dl<@KxKo|GFBEmPl`Z#J4dFa%TyCYz9 zkWLIcD6y7=pYF(f48iODKm;tgAcC(RJ=L5s;kNNb-ivFTNsyY%C{&04O>!2Steg(; z6XhHJY>@bulLxvE&2bwei=7;fk1LA0_TugJr{N!Mj&O75?*0qRfR%Q)4_&GhUT{Do zFIDt}wA0SoJ!j_itZ%c9Zvzo5-(a)DLpV2ZNK{Z7dxgy`CS9V0cL z4&R315LR(FOe(2G$`1C+D1VyR39(n=TmnNpX|b2a|LHXO8}F}^BSHE$hbV_4AzRpd zU4Ne9C#u4mbQ{kNcbxl&J_b_CBHFPAGIclQv&J&B+$nxngN|x8r~$IZ!qkuwR@VJ# z&CfY;+5h=ZWyr4XUu3T3frK_slZL_SL|b8aMBD%{c&IJtZ#lUF2~Ec$1u#DYG21&c z@960CQ;fyFp#i^#a0RI-FKD$j$Un{dk~8p802?ai$FyY_uo~2pS&&6Et~)qR_+Pi_ zc%@+f;uFaFR=d?TGGA(~Wn-p?biKQ(0Y%jTY@2jNDVs#M!A5N#q){WtXVo$gL z0007tD=)A%Qt;)PG(elfuHXuq%CL}n6CZ;FHe=*Q{~hA5)@dG6yZ`ZoLyaPCcjNJZ zGzzF-CrfW98vrWd{@)-M+ zS&=x)2}~&L2Xz3KPt?NXHVWHe^-+yA4UTZqe^dwTnAhae{WIVR4aP>+;xI6;T6)## zpNqqa)RXOGe(aj)BZJs!m)V4xn-?w>F&|vx zhcfZg9GWq2Z5UH4$s0IuXo^g1SfX&rd;HL(-Lr7g8V(LC1d~Ky;>Urk9z|lhblVC6 z&R9oLZX9!>$#ObCDnRU#5DeaZp`n~B`=Rw=H45q&*^WSZ@4GsJJd?1e?2$J(^w2)X zY(2vl8+t=^*)3^+VxpVZNaTN|G&A#!d{%Q|HF`{vsq{?C zJbCF}+<)7Q+q7+vS+xaQtd)+YSv+W=9vqh|N*Osv-l$x0JlQhif5?-|bKPY=$vBa! zfXbiI?`;v6+`DwlweP0|Ydj}E`5nnSsg#aR=q+>%HTx=HG|4Lbs0LJZ{iq53E#LqKt0DFP?f6jOq2NsU?D+xsij8WaG+-TD(aQ^^!T&#T z#rac;lc^hWp@u5>@%X^gXZ21>Df|O(PG)JY;bduWDsYz1DF56vVRPQ}b9DcOr^=Q$ znJO6Q{DGz9tA8=j7P#AttmG>3SI>}15OTq=CyrHg&g9p<@OcEK3K%fOwHPq%>;lP{ z0L?h*R;+oq9J$z4vMO6EY4t{8-fZrj`kD{Z7Lnw^xZ=u>>qqM3})v{kS0{|~5qzom-(yf%HvBEO zyPlgT0wnHBT)MZT`*9CG?C;t`Pof`V;)q_?y;b(3P>+B&SY&U=+XUgFkta=F_R`QN zKJ*=rFi^_L!htEeRJDA^Q`}_xn888`2#Sx=h84@Yv|0!Uq3ks#~r- zlXPn{HG+eca!#SJS#EK^ibNT(iwS~iM2`g)SUwVYncqlzI&a|y@hg-(>S|tZT)1oW zqecly8jp>7a+06!b`7ZCBr^UEJRUI{$VTAG3^R}Z76gVVMLW!&I5nHTFs{S>)KiBT zm`DOBGed;qZ3eT7hBlBGQ^Ti>>@M$0yGpTK;wRuem&_kidxpcz z&!BKX0VH!r(?Jf+pe~z0YV+>B_-M>s!gfH37v_YM3WpDjpYGf1bF3wQ8YzjempQpn zZ<}ksW?5)vLqc@3qScc!t993J=M9}gS@OuhB0IGa0!^GJCZ*gYcz@r=oonLSROIGB z(Avr526eCUB%%L!#n;fsQAA3Dxe%^I9jW$fCN^e#g#)3a6(So~mM4lJIWx+UJ)!!1r^w@sDri(|&_?o-eQ{q#nY|iivU8fJgZB zr3sC{rqET)bB$2rcCM(Y9DCvS!P(`)*+ zUb*X5q-O2iU>0mvQq9&Vvu*C=u5u-UJxR350VJ%EOoZA5tcN8Y(pSl@FlaQcuR5`N z)`@&l^ugKpPYUN~oxCf@lEub8qF ziI76&Xz~7{(M01tHc22>u$;~yYIO9nab)UYCrtn$9k*%UD zK&P=NX_n`cK7V=C$`Z4s5Go5&Q}^1_7@82b+~_8DtO!bg^Q7;Ydpii-dAqTk}mG8~XnOoy!i_;2?(#m4(UKj><0OKg|7 zI^7QNBx7a}-Wr&K_bcdWZA%n8r*8^4VS()Zpc~{Qf_MOTWmj~+6@@#2-0G@mxK64G z2%_Y*Z#J(e^&KsMahXTt^TF#ylSD!?Z`uq#T*l<=uX5X+z<-UXGytUVj#6tD%zRtm zv*ygJ)+SFYi>4p2g{qn9#WlGA-(2BD(XoPu@Hzdw-RW~K7^NKYg-Td=YB(o}tH`FF z;%@2y!`dU_f%0EvIl^0wq5(y7U~7xGx-W<E85sl23ce+o6X1%^SHOo>EIT94f}1MY78SOv5Y+2{`u#d!LjH_FYbW|GevX?z}i zp$?R0oQvMxGDc#NvQ$8tgp9$G9pcga#o1~mO`?4OOdR)t6qijQsaVXb@Ze%5c~L0; zD0G`m{kGdb+pD$2LOH(e&h~?lRMkK2ZUhn{I+QR3+ecgZOf=|-HxdkY1WervS+b?qD000alcCRm*@P8`Fm9;^< zQa)B%;NRla47EOg!S&^e8lQgo>rJ&K`dfcl zJPXxqX-LY+dLL*oMLBY(E!w_Xv$!X!I6;eZ#kZnD1g1foxpbg++yKB))P`ACxil;d z?gcw>Kf>`DrJnL*JWdd~VuF?WHq{$RC`t{C}Ud8b_@g)XH2(I%# z@RqibB846D8~JyXc9F?sgg#nrTmMZT^tvkfixgB~%{(6M1edE_JP)at{?lis6}!IV zP)neH{lTia^0^LyS2+N1RH^#hEs}OxlXV2uyQXLaaBDG*Hx0tB7<-P;bKQP9qxi4K zI*EM%^;!GM#PZ={Lt6|19E3x-i;_E3vaR_YX{yAz^{UJPOIi_NPm?%^PE4|CVLq>( zW;c9~$|43QDpd2H`TZhaeQ>J4#S$#@YF5#;0J zl7}P;8gM=|4Bcls&hH104ZFDs;@<-0%&ip2f6!p2fvSqAPB!_0yH&>7^)CYkdVj!lL(!-AwWH7?U_PVC+5^D;9io$;iTTUANE`&bxutA2w zaw@r7>|r`gDVPm)d&Mkqq-l>Ks#2TDX0vxBl!Sw7*M3p&NF&<@^=9CKAw(bu5(^-) zwvX(!45$%o72vniD(v8d^HgQxlN$G~Z4bD({qYQ3wcZ68{cnIM!n1_mV8*DNpq9vd z#-eEF!@k21wyh~w#uQuQmv-X7eLoC7xzo3%X5v+TMEaEjpa)~tc_oM$lqKuPW0Ppl zW?t7do=vRFAB}6~#^dJtl`4xSG8SkSF?BM$h91IyMex)^fv-Z}AmXnFgYSTYEE{?h z2EsRQVzXuuGKhi>0@r41ah((Dn3-Jw2)TQ zqck3DaAQF0F8^2c7IKA1?C3t`nRum$w?`!+Xh5WJ>QMySaXt~XV+|>D6Ht6#(;7G2 z30L)Vks$!YBVF$-bIP*<#_yYqc*z)0@$-r)cS+>Si5EJ-G>I>SPqG91Hpt++EuD=S zP$aO25e@Rr8CS&!c!G;)^0RD>2-)}L#Kg26I`G$boGxc1-nJQ>`~*-B`r@BT5`(O| zV+bb+kM_2S>2<#IZ#_-=tv|bDOOCT{bze=Kdpp-~Xjb27-ryoLEvR*r$bD`oq zC8zMN=w33tEnn*(SF;~0k*EXDP(q8qK+GVTlX&5qq6@YGc}iazgk}k+vj29$QX{6< zjF2v%xSJr7bey=~e)G&hMe2{_5aKDGG1!XDP%q;`Hi)6Ij~@sXG@k`Beybp-aX&b_ zq(ojHOF5}S^EBSCRd*rr7+&XbK)WQjO>&!a9tp9w5s90tu*^nuGYJd?+LOMObF4l} z)#T8o+T^^`%WPctcf1W8QVsm>@xo(g0;H7P^Qjg6V)Z7SIdS4?2~%Wod(q-AiPH3R zL}LdB&D!rJfuhpoPXreQ5eXh=vy#Ef-L1x%bkQgKW7N@k_E`&toYF!oc(G{ysv$QQ z!4Qsa`<=XsPsNjPS_N?>fummZd&^GIw;JCs{Kwk4Z(zUpbuLs_CST)l>DsW@Kr>z# zCy3kXF4<6yER^%A)g>Cj(bT^MUhMPm1RAo$dDfTe{+FxVBoD*Z%Dj(ZZnlW6YxuKk zH#7#02L(&JN>S*!F6+!FSp3<%DkYND50H+yYiJ%vOljsa2sWBNv_4OctU^oB(6;uXS`|><9o(BXb z9hV^PIe)Ca`+)0=+pT=Q7LRYQ0M%6}Q`iMy&OcHL5$`Er17;s1r(WwwxLpo#c$_xQ z_%W|nHxqKBU@T|~=>d`>G&wq<^;r(=qc|KIWG4S>cHX>xSr8m3g{g-pu?(se6YhN^ z%|uJ{e(;gW->XP2cS4?l!Qic-?i)DmWDnvE0Gm|VgjBT#6M!vp0@S<&nfyk@=Nr%5ap7U>wbiv~1B@Khf%*o8Z*hwU=RAFadF?8GHMcP$es=nj zy9=U8*awm+1|wK&nSNAtE}3!Q9}Pl_W>sAWKnD2TT|>1EQev^9`)lyt;$c79{}i@-zs@$_~PER zc?FgVLRU4qB7WS!V(7+ok}gyvD-Fl6H9NsbIse4Xy*cyUMPEpobW4A^Pim#}$u4Rn zRMCcO8clRqYIv{F60NaL_)q3Jt+<9xCV(PA4Q7reyBQgi1?k--U({X#C$z>k<6K*C zYTQmmgiaZ3BINmo?Jpy<=iKZu+6-gC`#0DBuv;I8igiDm$`Wdl_n1PyQb@7QJ%O65 zF5z)BFNPNZnlAMA5BH<2f7;3>dk`g>`Y3Q^=>q77RNxPw%hj51zNtWHteHU)WUuq| z7z*_2_gAbtb4twCCkY2YAy~pZNxtgJn@}WV)M;#lu=X>w_ff&dsAL4Wtu9%!zdvRy zWCYG}z5W4rPXm}~7c|3;rsJ(!aj zTYF}PgF|}75g0dBxU77vr|hdb5N{wx-A-j&vma ziQwy^9aWNNeBF~xSV*EW|kIASnMICF%{Af0UDFr!>?z~+3Ag&_sI2Y&u0d7?9d_kuL z#EwnX|1E0*{DRp;2x+LxPn2JjW=5qIlA&Yt?*2ksW0#ih)5r#qC%!IVPQC$_L%Iy6 z`67lVlqxHL0uMLtG8IwR$GKK0iPb*GB2R-x)3 zajG%7tcI2)!H#vg_>Dh>OppVk-3cu#T&E$E$B?S29}{&RW!Acp6q;y{4D*VPI)vp& z9*cnlQw)m32aHNs+F(MySGxB5iLW4UsEG#xpAO&!1j`ykayr~n*u2$%@{dOB=54+# z>4$F+#n;3XPoibV-iLGH(y(+C9)4Udlv3r!*qjOUMW?E_%l#2=8ixg2gOMTyJu7m6 z3^)|S)uwz?+s7DzXS0EXPOOt0M3c zzAfT5&FuI!uHe#i+_=x3^+zShn4b)Cl)#vKdCa^Lljzao?=j0{$FuF0-%Cf+5Uild z#p^xk5e3VZXbTysq7rQUlKMv-a`UBc=xV$aND2@ty1U^1nv{D6ki%$7L-kP8^#|9y z2P;nitFYF4Z@$(@K|O$$ZmvN~k@@~gx=gI~D8rU$8tNp*I~mL}DwQ2Dc9zm>!vNwU zq2aO;p(K95&p(rzuK(|Lj%oqmr04|@x76K55*6pw+oIoKWNM3QAjx}_2V1aDwOoKY zC85*1bp6ldAkIeC6emVcHn^%>ib7aC1BH-7>LKI8?MO9x3*-MCG@7`jf}7^C!vw{s z*C$*yBtDx3cM@yMD)E6pN5UZNsvvL9BbykYa4(z)c~v=C-2#P@Z%!DJdy>3m!{~vO zy=-v;;J8yJLkT>y%;>?Mh9QzY?9HOHiFCZxEbiQCmcXo z%$KM#RUs2jAbw=W4R%)%GF~-m*d^6eXON5Pk)6&-$a-TDBxKI^(+@?&{A|`~d5$)W zB1U^;j~soNknZ%gYhX#^AIBhZ+XDz)Jo8nrwd&o?A|JuZ%W08!Cl$$HDajkmfQ^I8 zG8MLVCREfiU_m8n6NA_5=#$1(GFsHs0nH9f6zQb!<8;_)WrIb!gj7i7aH~3IWDk`k zcOrM)U-;mGLVCKsJ!p0i1h6dj9giC&86F5jAG6IuKDoRA00000%&~%?00gpc+9x6Dt=$oL4KPfJ*=ZhHG{|%&F9~6f?NKBZfiRKqEJtAC4hquEPxF)+*o($&Z^6 zp;MZ6N7`#F!|}uzqZ#vk3BZmG>Gj&T5)pJm7(tYE>H_F%jpi&?#co%7wWN?K9tp6L zeI>?Wzh7=%-2)(n;5i>3yMQg5LX+_GqTvyu`0%5%zs?RaTL`_VDNyPKgjVsBpXJxw z8?&7m#-?hWzMw@HBa-j**%lPCEmdd}#5~uy;(d{oOT4Ng4=dm_PXLD5)nzJ?f!0cN z3R*A>6zr>U#-mf)mBqbjg-Y}(XxnxH%a_d!kv_xo(!OD7tNl{T@EBy>0N6*a=VBhA z*-8NDyRJXv)IAsv_cEZ(53LS&oc|Pn1g!mzejD30O*{uiOmYb9s5@Kz`I!q+?e-r7 z1?axDwWc=~e9*--ct(3Jt8ie!()D?Ld+PqW1?w5_Gma=L`YVSs2nwlF;>x|&v2{0yhLQ|OW<;>mJ?j&5a*;d&7tf|l8!fYgJ zImwSg98Zq6$`p4sHn*I)Q;cC=!!1(L(nY7-r}3g7Xr5v-c&S5<<;!-eNvFd;9;#Jn zVga~&MhV&{?(oWTWLf;wei@HR<`rjCB^mnhVBUAR(zcIyh4}F@q|M0XagOo?a9Rh= z8+B))xvTqPJFH+(xy8{p-wekC6i9Em`gOmrNq*}8-1RFS!pRzG*0>1et{Ug!}Y*W z#6gRzMr@eRPkJ`#>U7m&3~NIM0 z#Ie}1H$dH7Bk(0XYOb7Tio3fLTw`qedQ<2v-FyYbu8_N0mpvtE$+4h%ow-tYudcI zH@ggm{em4`@;j(Yqq70WyW0dr|1XfAa0<-PIvB(3f-Lo{SAy~33Fo%|^0q`5wX##> z_XZy7-y+A9Eqb$JhA*~io=Am%8^l+(*wz?|y_6p`eZH(;#=ZxD3=8mpp*^9dgyt%A zR@zkSt7P+Bzp#MKb{BIon_v#bCU+I6OAmm9V?)vN)g1;%A6SKEAKF^n4}SrlaI6l?%-lyR8E%117-% zXLf}NKb-eYK!Honc^t|KuR@-6y$^2Tp?HW4%O{^Y&qU5{yu7gQ1i3{Nz)3eH0+#X7 zd)`!kqRYEKQk1BHx)$!npRtTt63*n>&+de-lHc(MRk%-S6hGZR2KqYmR=YFpYaeD1 zl0aGKd{jp00SHJ%lJ}_5mV0(2}8! znY-2~aKIQluJLEha|p{~JnhGyh??Jb2_E&#HTz)$wrCk|$5T-SDN0DP_Xb)^(VRlI zxC1lH8W~5X7UeGqjC(R0032lUMi$5iOLNXLp6qsZrZCv%}yh|hCv1!(&Ix5Bn1*+l>`6CP3&Hh;J6w?h|AuyVg{B*n zEGf|{YG(v%ktD_OK_=%#ga~Sz{Qq-dC0Dmp9)+h7FC*cXLsS$&+XPG}$QsX#x<)h< zo(WrRbT?yb**adJM*>d$A)lqhx3>)Z*i|>3Z#n*E@}3$cmYGmDAdEW3Kml3I>4ipx zt;$(Bo74!(C?nBT0Mdh{FbgFjnEk5n#GN3Yps&x~YiJ98K#VhoHuSolwiGaV!JOZW z%;iM3YHy(BO;d0qX)ebpHB|W+wcJkt^n?J{wtTEPL-adrsJjF~j=W>$%SnPtlR!Z= zG#w;$#PmsJloUnY;%Lt~*dWUAcN<)k6|oGi>H~KosIXMOSbAPva3oR5NjNJjT*pq_ z#s-p5lpa@GGSS=ltKgCT#+<>SMFWa>L>se{mXq@7z@>& zRG&l>Wo*lP6Zhqnl&9v{C-}S0E=@j$fXkR2$-_y|u=!EN3)h$Z)405{sj>9s?F_)?uBvG(Yy_qGRVwjhJ0Z6I| zR%)OoaRdNh`6JxComEE9+^ktf9q$OJS^05{+YalmT|ji1Of%V`KWy~2xcIe+_pTI$-tp-0<(eKEh= zwo}VuaXoGvsowZJH&1AZI33@4@R%dO2dfKC3Ov65mxJx%9@eg zMgRfE*#HU_ig8_lIU1k71*pMGh|>2BN+?^frZ;@fGlZjZf1kdt+lFPU&JW=BJNFnX zG>-r>6fE|Q3YRr%pk;%lj(4T?D>utFXD64n#M3K}yLlCNob`_8 z3c5vAB0zb)JCg(<$vw^nzzgYNff8SIRQkp{p7LufgwgI_;W=GnaE5k*67-1 zV`m--nSYMG<_&I>7O7fVj?OUMc>7k*(O0i84g4NYq?ij^N9+Hu5;6^JQ#|xzx{W6U zoQzu@>oXg#9F{*Lrvti4HD3V-E51Gu)$z0Uo&i+YsWSHa&Rm~> z0mZ8AsS7ApoJT-`f+1k%O8BGQJujdH4A(QP}OY{k?jk$zccokqcz9 zz)5al{yMtocKg4F1=o7G#sNuc6)wx^1__i*-4qB$Br^zQE=G61Rb}h*nL1MT1q*zF zo!Ux#DBHEahAPLe+=5+sw#x$uP19UUek4KZf^~E80jW}BW(BLq!HXbAH@fj_IbFd- z8x^oeDP02_6vYt!Teo%^JR3=f5*S7~d3ddD?>iw*JM{^{RaKRNGlV}j(!}SB#Xaq_ z+psswl>p*x3U_74A-tFi70*T4Ls6uJCUt=pgankJ((-H;o$?c=QR#KrQYZ*L}F= zBb?VA;MTcGA=m3f#eJ|B>TAo7YeCL23^~pidBYe1JAcx0g8uGXR+`9yIUGrV8Za`= zFzxp0 zc}Dy!8*YVBTZ4*CqbqqVf;gi!a!REQWCs<)5p{~H^(9OwM^nx-tNl@~cl%>UOD-dP_;21%;i4Cppu z4qr{!B_t&UThw%; zeRN;){V4l=NhS0^}o70|S%{L2)UAWnPYDK{jBc);3< z?nK}{6YWZv4PWO<3iA^su9I5riQ)SFP`Le`hSW|Cz9VNV-#ILRQfq8@7{rdM9a9iM$6sjZ zoZA+@^hTxn8au(B$0E=)`6NvlhiX_bl1O~a+tRtAeT&N4v`znKDj(@r&qfoz!^x-) zH~Vb)#;`5=OqAJ}c2m$dX6ka;*@0IAMwmtYkrI5F~200EX1gY@pp z>#c2~Sx`lv7B7f^E=uwlt7eZ3;pH#GYx=KI{s?-&{P8mg3kihk>szzr58U|fR5Z?y zt_yiMOS)MKL2rUD{D(Yf&~GMiVl7a*$dXUAyH&7nMWC5D7bKEM^Oq|BEgyFC%jA2X zj$456#VV~$;1%_pslBdu)ib>HLppEydy>YBwQ7Hjuyq9T<2*XZ zTE%^4%r8!((^?v5S5#S1c@0)6havn+1lHj#E}H7$EN^Jj$~BOx+`V&z3maH0AV<`$ zDiJ%?V~b4#y_<}3yag-J{^fUxJu19DAB;x1E2C_XM1?nJ5Ur#Cei#-CB3y9W?J#mx zV;%>zJkW)92nYU|upcC7aCLnoj3Yh1(MGzH8e{hn);^$qfBPHm?5kr7Fo+h1Cv_}!OHXnX}VWL3nvR-%efg6r2xz_MCrbpu>8A#01Mz8 zC?~rB2zW=%gc1M{BLD=!qyY`<(;KTU^$WrRVy^xfCOGnpa;9d*SGV9~JoDvAhFM-w zZPlaVWU0&r!|-BXfeCL+dx6^Xh4E}YloJkweWI7pr7oKvs^^Brrg2J^6%sUgs&&>ag`U z4iF0lh1NPE+o42@s{luGcttjJ&yaf~2xpM+e2n|Z?pTqcuDoiVX|(Uc)tXG z4(wl|dtk3J=X!fDpcx(iDACSb+>E{U+&d5z!sV>nYm^C z_jf;l&PiKg;*cb4L{8uV#|t`*``^ItLZO-UCWRrAW=|2#NnS_*wz0j`Jp=+D;#pEK zH@BElA4dZ0Byyv&YGc-ySuY7Jl;aZLPi%2j-cIMNfMUtct`NpcfRhp;tHx6`K2Tvw zqu^cJ$(`_A&%+heAS>Vc#W9t1Z_Lgk!KQeaX6-4pw|5_i zii^IGTK)j3_l}=LteEzy#CCk7q9NR=*$wn``64{7(FG^jfZ}OWHpE?B3_d6^Y+E>f zA`pV2>hvW*0y-EH%u)|4oiQfmKniBY%LMQQp6p5h00000XCOKRqb5Cg6;oTQ8hDY` zi@V1W<0=jEdRlyZxMF}uX)HUCaTqG?THl$k!blNsK8 zT`9??-D&_tJ}k%Kkuy*cggp7W;v%fA+y3S*Cfa{H9Y#Zd48QwP2%Q5P)5CixL{QDX z(^#T2G&W)T70SJIDxr)HEO?8p9UWvL_XYE~015ePm?0Dh5V5C}ZgLod-2tA`;qKky z8BKvz4QapL`9&7dpc^VF(|W)PDKmzH7=jht<@NN(%E(N@U;qMC(ub~G+(ktM00r4l z8%><`b_B0K71xt+*(|TsqtU0D)yjUt_!vruWs18VXJ#T#{!1 zA(`qcNsu}OHZ=o7G7$31KyXw50wZQu!|x$H?#>=~yB(}4FaQ9!fF6!W!&KpQPJlH3 zuXKK)AZg3`m%B3Z~RXv40Tub3hWb7f7No0>)j^-d4%enC-MeM6RVgD zf&9NX()uYHL7ds>SUPX9&WNRcEhR_s=0E`R!P>@w-=F5P09%^Z`dUZA=VBWOGdY)< z9)J|}TXm&&@XkP!nv~BZ-)O0W-OL=J0euAMOl&on4mUiI0mz^$JT>Of1GJC;0U!bN zaMMdSQ+u7#R-z&a*qi_)!7x`LuNXzLI?4|4_w;K?i>h&w6N#6Dy>32~AlC|#TEv1^ zr)rluZ0A{FNAe=pwuWPtbBHesUDzi3AYi*W}n_EiPV~^K^l&Tv>rR$uidWVU~ z&r&Hi#rQ8dJL9=-8MUBx04)DPc>XcB()CqZV-XQ+Xc|#KYi}bK*V7#;ekg+)WrCVvk3Vz>{WFm3p#dV<`;*ZrbGmj385KX{6452$TTP*a9T=%!9xZJDjXU09Sws5MnA%86ojAn7{2t*yphn z5w=J45Vxi4=S}e<6_QtG8Z9QJ#qR?w;m^J!|D9HsYD{tFoq^aZ!~RPdx_rS702gGl)_){Q1RBVv02I&RJLm@YPd4B42sZ;5UoGFn~alwLc^j>!M1xY-5(&}E9W z%c&-S3H5-|k?Z6Hl;l{@EY9&D6pt`!VfWBT==@oKQ6v2*U4^8!hP+$tv<^2Png0U) zn*8U6J67?C@T;D@3j{C;1%^}&+lD{{{XoC~aUz-K_?_Z{FN4a?2GjsIPWhk_FOl3! z_A_-({dauwPyhjz^9E%w@c6oy_FP?`Sm*;3K!R#ifU+PHM#Nds-AYQr-T7{omsSy) z!2d9h5`1RfJ@>(6WJnwsKoG>9s$`mMC+WY?V_IyB{Ra!=C@PWX?Nwk%@&y1twV}fR zTimfPXCMGAB@dxViN_qyQfqWv)Y;*%Rchb#B-v*4NLS4T`4>f(2)S1cNtNaN1PK7YrLh1;&ca~RrU{647b&tWC~U3lTNy9|{>y#)z9fN*0pOwl00000 z5~2yiS2Qpn0J@FW7h@lN0X4=m%0lF~jR<(dKoc#j@st4MV*_f2DO;l8)pq{Dq1C+g zJEp3aA;cudBcKoWwCrCM*p4F+ScES(af+o9>7jc?OnSUvL;4xLM<^5kS8Hdz>$ul2 z?fzE0&RPgqKR0w97R+2o(`$1;maLC}PL>UT1rn&6^Y$(P*a_mAS$Nz;&i$tBBc+XT zJX6x#5*-2uf{YCHjtb8J0R@TI*EbFH_10lFdDcDf$yqqe$rRGAZS@)%6MK+nn0$B|o>AwlsLt3M70BeT_M*^34u)2er*T;Bj4 z83XW)MQDH-5O>JJQA+?UxuaNQ7k>S1TrQsnh!NaH%7mb$U8pTjOyq|_&{HcP0o@{v zgt4F?|0W$)D6k__K2x~f=fS)MDmcNL@5*}N?o>fj$!;R!7x25m&zg-aJZJS&z9l+U zlOqT{;i0`uquJ+EqB1Ve85-LAxi`FUdUa=~pjr|bq>xWE?}n=bP*M7^`a{7|ozw?4 zSAa3IasUA4yT&yMyskpM=~+7{v7yy}C?01*flXCDL>x!(M*)Z*J5peYaJ!wX1d1vF z9|o;z+c=|&!B}~rjgJ)OpsszxU@izlePfIf!s$K+1~33vg2OeD+ykf5eNC3SqNC5L zKUpV4F>xQwnl{&cm4r&2+R)>u;I2u>!2zCulnaQ`}JC@H+tsE;gLH_pu06BqcoD6nWpkdwc+!e*D*7D);#For8u=&IV^ zlHBCXX2yQOvP`InGs0$#a4tjjHd2_5j}Qs6aHw>|ATrTXg&?i`kd`fwpWRLKap z!`q{@r~m*eY@~n@c7u37G2T;$(4APAFot#7JyJjCtNo^RLu96D$0`Hu$^Zm6?Mu^; zZIy%D8|=$J3qXLpEgO<-h5@_)lGS@Cr&0`>rxFC1um%y3G@u6Gr!R9N!FEd^0000l zs137H{l7q7%nSBG><+IH13O}2Pz-NqBE|g z7^2Qtsu^D35$f%Gar@^%@5aaAUO$7;NjY@a!H2RWC#L&{8)G9vm%Y2Sa*<(?Y%W6- z%)1J|F299KknK0P7=y*GQ|`Kcz4Ty5_aOXNfeUT`F6ebfpg{H6;1*y}7yyyP(+e{= zPsJ!4`8LnYvchUg&;V)SpaDsVafIr~2qBXzyS4$y1SJ5PKLsOKB;!&z;c(DXZJlGd zZ~y=iDJFzdzQ;%&e`T1sq?>j)@D{0ou5293te)=9$3Ex+OrKK?czciv<(a*!xkc^( zTAbqmAQ{O!FSx$C&Sqb*20ZctP;%_Pca_2t3*Mp2{R=xVdU}rcaLPA!?4OA<(;UFq z#%~$VO(Kf?x;`5D-~(gMz2E>hajZpG)RgKW{m6HOOi=J7X=p%P0G((6hY9hF-R7q{ zjVb>#nR^P?8pF*8&~!enoF#MRKRRtd1|(Hb?#&3tGXf#Zz~Y5c?6P zLh9j&KtDRVB9gfv2N&ofScVv^OQKzL6N?t2LX@K0KE2pz$z*vE94|Jm_WTiOh*>~C zj(~*3ss@})TQUXdfK%J;i1%Nh*;Ip3=_y=UPAyX&1 zNB#f}Tv#UuPXGd*F0%wC{;eJ%1{=|9r%Jk?$E5%PiRPJ5;>^BV=bb++C)XGpsL}cA z`J2RBMeHhm)3xtoM7CyIIWE?xtVOOh=>Px#zJ6vsCz0?heVAi^G6(6EY7QbJlQULs z_|j)M?GmzM(=MS;3^`4Co&8Bb04(jC31}!9P!kJg<|mRM;1)neVF6})t#cH}$CbWE+y!X~4JcpoETM+<PXBp1NJMDv1|vxC^**FZ1a)i-B?<5s2X-rv}9eEz@z z_~S6=gSX#?dn^sekurn(p2)HAq-#d5&U^zjI7K{wmG0|{*kTt$0w!eKNB{s8TT9Bq z7D@{PEAcE!FIEe$FRwcfRvQnM?*doqygy(R(Z0hg*)Oj;L!SGQ{TFSHo&Y-H87mJq zHNy3<5XHRJO3aoxZVvD|8~y+Q F000P(5d8oE literal 0 HcmV?d00001 diff --git a/media/so101/so101.webp b/media/so101/so101.webp new file mode 100644 index 0000000000000000000000000000000000000000..ce65e94bc2ad2d76835f286c32b00e129b9251f7 GIT binary patch literal 133522 zcmV(fK>EK@Nk&GB2?79DMM6+kP&god2?78R+6kQjDgX!o20m>zmq{cdr>dcHNf}TL ziD_*(li%+)|L02|n18*HP=WUzLtp>s1;6@}?!T-zaOD;M_p8tB4;|xhGVpeX^dD2B z&1>^k_8;5v9`+yjKEPgIzwCGR2z{rgT*iOz`Q`M7K%dm!!+)+jn5-3jYCmzX&_DV9@Oa4oU;gj>k12me{LlFR z=znh7gtK_@)y%qjL{ipuVa6j*#<3I8Fclk5=_poR6U-G}S-tm9$ z|N8%h>tptR_&>J41b@(f!2js~Bkj@u|K8{C5C8vruiVAw*6dbWKx3=RzabYBHB zmyE7$Yg)yuTq{%39K}q2BZsp^Z%i2y~K+kZ=^58l4AzJ{;1P z=+ur}RRd*2Wa43DimpAUJCN#d+wQ_!xvDHHZv)LI+|`Udi2w>dc=@{}S0m%zVfTtm zA#M)=W-pxO@tfRv8wDDWNSd>kd@KjkxIk2OjC;sZxNDImm*yALU1Q`@o&B9vk-Mom zo9TZ)Yqn^WqhSEU54dmve?iQRh#{O1=oB2%HBP}*Qt^ea;AI0Zcbg$L3Zz*u{SG@u zzZ)GYm8pc(3}GoHShP6WvkVM`tc4O%5|Pk4$s)Pisrn|e77PTji|nKbPbXR(%$DM>ZpVny=WZWx;_kE=4`=0dFg^a-4d@7);SJR-IZGX zVC2?g!AnWV=%PIoAXfF#G+Iry>;TNvssbIQIJ$*58oa2+{?f5RiS!G3QhT42yK414C$NdoXuB%ZvdG zpH&Z&IDmqQZ?S4Mlfp-{Du<_g940ff^y1@~yn3$a!LdeeaCtd+1IDk?45g(=^k~=m`JLRk2P1zr)vD|@kFVw|a zDc~$sTl5FP%8mG;=AQWzy(n&@-Ktb6=j-pqs#8j40jr8glvlR> z$$X6_AwR(C^mYYKTYcc2K1rmzqoFa+3@>V4I~DhtUB2WLznQP1hTGs29Tc9$)bGY( z7VW~41ftZN;xJ1V?>tweG!1`0P{)@&F{990N^ja2Ml}BxpV<*=axE&g$)+&QVHX&k zuf9yKBDIE=fD7^KL&D=*oZHR-fgbO* zhRqNtt%&lqN4dY<^KTvNhQRu7p4KrVsav-v!}Z%kKPRNUTC6#19VZ;f@0EfZjxyuO zxaqDgDPTwv_=me<3gE_$kHdDNNvv{-Cb*&RnW^tj z7ZG#ujD=+YlbvD%-Tr6>PWgxQM|d@p#1jf7;l2`|%JxQfUP$~3Mh)4$!|B;>*m2K$ z?1&r_#-!a7%IJB2r-9$s=M!mzrubsWd6(yl)1DqvDa(81k_Z7)U+%43SThDIR>D$b zPdPG~5y9E){aCpwz9Qh?^8IA1U-#Ub1rYeqeQAV4r6u%bCZvk1ow=iEJY(wnqLRs} zT{?^m9KhnG>P@Jr*+6$<>SZfOhwBYC)zqD%)4NZ=c&g6u-YlHNON|GXIE-j;@^@Xm zwyVNBqME0=qaF!1Up&9I@Lez|e(-<>1Ns^Q^g1&lp0c}ng}Rrd`q`SK;>zndgk0&4 zLFaoAQS$55ji*C^l5o;z zrjklb?&v<48Yw9l@PoLN%Zx~iR|}pg+oD)!V}l+Nt!!akVDAw4o?l9=L!_IdL+eKh zvkV|G1C85Lg`<}M?oydqa~-8-ZnryK1p1TNk%dLQA!27bSs-S<#DW9|M3Gt z^^v*6;xDPiB#%;*HSfFwZ9d)@7K0Y2y{yaO?x(I788<;N8?*wh(}Vu84z9nb(^Ci0 z@u+MnnEC4nM-zPdqoN#gbXQlgVSarVwOEBJ{gxa1WJth8546IryzeKIBi~9b+70=W zu1K2oUZgAZXS)l#Zj6bqIy>R-wNlQ7=TFdJdos}?ID2C2Sc>K-Q{4!mCo6nHfNx^o z4Ic}?AU|R5zu)i2+lk*Am69J6@Fc5m;HZOOz6g-YxEAJnu-^z1IXi}yc7+S09-f^G z*$QAbASdFlgRt5?TVnQ6OxImsO3@_SbSAun^`1l7zAo!Yp3|g(y86af>S)O z!F6=BOsmBKuX>uR5Posv;+zd#zBYHxs!+5SOFrF^pt-3U=k}b#*VfF%@8?{ zqWhg0_>WEY$Asa8Q4zY{M$Nh4zm;eea&;`Y3G0Y{lBtUlB;D^7Eu)5IE9DcwMc|oP zKc`N%kA^n3CFzBD*6qN#D`zf0Zpklj?nQYNz#9d8k|;Lk{T%g?k$f?u39X}= zg@^*!&Z)~t(bWr!L(S$-!mGnKpQ9Yid%YdNw;(K}COv>nXm&I$O~bdh0aEb5XJ8~co{QZ1YJaWWlX z(&8W)xRd|!_R9B9p^rV^O#BDO^G7kLB#;8UP)^rL?YNL1y%ri~TgZ+iwNcI8d?o6r zm~MU0L99bnG^zEfV+v2(k}#0Wwcdw{xE6hcj#0-Zxf@a|9*wNG70KlWXV4A{hjjst zl^(F*T=en`)&Xf)3d>iUB-I*4b4+!F-%s>x!%zLoeotq`D96jf4aMacz40aRrRWAw zE7P6Jrx=IHbR&|Cy}~KPO9O>S@H+Pe{RQhD&(TFwMY>cN_@j}=pku)$6+XcF_EQia zjU@65KqXjkO|0%{G7-HGgL{02uEMpd8KGmBI~WvZIIcb)bKoR|X3K!Ar=c|_tXK@4 zXO&Vd1|uZ{e@kyfazSs-qj7C~0B3gw;=S#*2~+gGZ|d2o?CoRP?XZO@(K%9w%n77e z07lg=`8wEvbpAPhoI(jA}F(|FMSUUxH31f%Ea;Nc_){W=%Z}K&D$Psc9vqRh>B?)D? zbma%Fgxn=XYnQ#j-w$`Z#!TM3K|yl&oEA~82z^POQg!jW9Hn?6H}8b%iK}!9(|#Sm z$Sj98`#XZivjkM%cN{~#gPo7U^?vPV>oAPNMKv^0ItygAEyRr#YG$ZGVyHix3fpcm zlTSu-^RRyjq&y)v5uQ3%aD8b5rHWQlNd#ny9o)H!3Fgmm`9d96fbUxHy9}>tMMz`l zh)Z7apTs;ww~nQEYz2Z0Gt@5R^W2*1nJKgb|JkXDIjWsF7OtMu9WUsz>{3`JO!|e> zy$wQI?HgEEyDjba(bNrd7cTeOi*=wmg(!mai4rV^38aOR+M)v2G(x&+*7Bx$QIZ0g z^JnLW@pQMGXUFQbg{Hg1>=d*LF3^C}UNJ@8Ubha+WCX0(wt6M%yE6JKI?+N`k%20# zJ@SdYhKEcY(o2lhT=to_xRl{MS?K&)z1VGA68f1XMEkbCO68h@lx^(9l~j%fM)h#R zk>&r^o`B6Dw*ZyRD9O^1&c5j-9ko;Hrfc1NX3bf8}9|y&YDDy zwBw?U-oNR4hbOcZ1G!SfC_kS>CjTEF_ZzpoIN#I;Wm}tVPlSsz8z7S_3HLM`MUrdL zhCHv>?)Cvv@@DrQ$5rE7 z)-kmI1DGQNhWnjLMCBt8f?2ZiYbYo*{457Zp-QQH^9Efw5SWyfp;cOyJ4H}=BuxK3 z*k!kf=?vpKWer-PMKCnv&Hxuc+*EE#>+U+HSQ!c2dd0^$Z6D_11G&zBa`vr6nzmI zp)D!U^A8S+Aw@!kzL>hdi9;cWhY2svj_Y~Eei3umF4CoP9!9X1#+6|Ut@b7kRC@^A zY!9{0?{Z6Cnb?#DtH692BHOyubPm4hVhLXO*t|EgPD6YjX)$l%H$EUv~sS^!lJS;7J{reG_ zl;Ty(*^(#F05jLxIBjtcqr!6z(z6NFg-vkmx#0Ll7|y)i8m;M;>w9IR3y?yh!%xi#|#Y}3o zHZ+dl1UIw)7{(t0F=`!l>?wDCR{nE1(_Q5LmDRR?B*@^rna_y*8+JP0bp=EJh8sVK z)`n}yO}xM?uG)_3%fOLZZ_1=YF$pnFt0AaEz64WkyDT?Q%DG}ti)=M=Skcxt6vu%{q|#Fy z%Erc`VwM)ECD`JPvhGunc=89T38;>_lAU^^Y+Q&Z?4G2=3&dI);O-&L*yTAo+N=Cjh^v2DonBkIdXr@sEh}3Am2= z(TosM-|;hfRQY4J8C7kfAS}7`>o;*rVRWgXR85AQH5?v+CH0)6S+YCk8}Pzh590Qm zW9$-H+WMt6|G{$rscSPqF(9UiXfcofgBt#nsCG>`?K~XjcF9CMrj`7qxD%Z@8RWe_ z&uV4+dFOH+JMe#r?0xyoj{B7PCbjn(u>oA`m=J2nk!h-eBXXfsGT(Vs6_FWYW4}tP zy2={qTt6+H z8dIk7JCvS0%l(V1fM)Ak0QkTSpt&*{)8t{qGr_C{DJ6!Ng+`1*?pB=vPe8-*3$XdU zWvWlMzwtExE3#zq0FKN~r|3%#n}~kQN1hXvmqJ1DIpPvwA)uh`j(yc>URdYPjxtw7 zMIaQEA2DDk`>Z<%Dcicr7Hp%g=*oncp(B(nDfM{b0~2Ow+9*zeg``4R88IjzrZA!7_gnc~@@))Tl?;Jp zUV95P;?DY$UkGK8`)u~^0<^8ByBS}ag`UmBt+1_Ny8*aUC2fp zxiD{{9rIPvDDg%sghf>(VPs79+WoN2d>b`SCp^&Y`(~n@!DGdqV;CqisfecaS5+L! z-tWAK1`#|$xw$0a#=)7I%TsERl#pJqPe0#V*?EK`Sv(@Gf3re-0Mqfw&8E*HLpB5o zno;rZBwq`VpTdZ{k@W>aQH=5fIx8dzMx{UuS6uoSF$E4uMRDvv+&J0Y$Rkt$jg*7_ z+PUP8iuqGo5I4_vK$6&bt9MNay_!{yU*J;0%lU-tKbP=(yh%TbGvz`5AQ1=422$Z@ zkI0bQ4T$&2lDTnanvfMn(pqL2!}ec;cW1Ihap;9l!V{TM6I7fI5#B>1VZ0?>Z@!#J zoy}mKK#&?1Cc~DDRFqhHB}c z)fyTL3qP~Vl)K>!_iY1O4Bkm5J}Zl=uk44u@=*K;IdJeXA&C3Q!iOcrND2;b{_J{Vm~JIjJ*{qMKxO5 z4os}jiU|N7zT@{3#xH5z__kH|raw0{8QF{+IDKnuvp-h~0*n!S?j%xT;BCed=9pv? z@!%?wAlKIg5;}b9L|_vWC^%5-$!pIPzq0C|(#G0vB@f-r{dTNYlT>}dEMy$P+> zSNAnwVjd~(z}t7Jzv*ZvubiLi|M0D=(0gg35U3wXet5tl_ub9{)|$u>6E;MeKH)iA zQI4gVrU=aUB*r|MlgfH||IXdt&UJj1-B<@`C+fMmQC0wd`iQsqkOBig_tJH6NLi9U>` zX?RhQ&l5vm-ojL9jC^LU*iC4}F3o)_^RWKxUMqwUeqnl2uelDoF0NCv$k;@}*l_Yr zG0T)IE&I@SYfE|UURXr--~_|%{+8~jo)f3wJPs%(^$5|J4}@mxF7bBp1zJz86|}-td1r`O-p98v{(0W1d;5CmuaygF7)#u~-u9^eo!YZy_jXO9 zly~<;j1escPQ1L8P>d{PhGuTvNDN+mV1`xDP9w=akx#0FNzYFZ*Et*$;2$LN{_f%K zbS5%d4hA31T~)loIPie6w4u`GBVTvuoc&Xpr^WIr1+}g5#&PZh+uqdQs$-9}CmGG_Jdh!?KAQ?FRfJl7`zOr3hu4M zX2BAnKI{Lm#U@tyQB{jjM3c}e+92d&hYg&&#HVR~Y#dQb?>{1vt2z*IZ5*EuNPoby z9DZ8$gX{oKTc1b6Zu_@u&Z;LcsXxlXH;zxz{Pilu!MuL1qf9Bg$tr0S+ok7KxaHk3-K!GP;p-EE3C}pN_0@mZ8Y2Ba zWSTYf*cF5=pC;-fptqM@pFJNJuwJ2&GgGbpK2Z*4ZZ9&w$KR418yG{y4!aN=Vi34CLf_>^{L zwx1Qg;~D;ajmU{eeuBVSYBFL&iw=>-1KaRgP9 z+tGxaZ?S#rc1{wIFV0G333U$VK2NYhC4E!)*1EL z&}J!dZCbkN##))stAhb{;(=;*wQAds z{2JBdQlh5DG^*C6`RPoh**1ne$R2Lf;i{Vy{CqW**3xJZkc0l~59AyUEmQn?aEV{4Q)+O|EI$n+@Wmv;UJz62D z=P@J%nerz|2jRV|_u%cA2SGuZ9%!<-8E=O08F8^2OUD(lunLU^#fLfXwKE3Y$VHV+ zx#Y~-A2irynO$APdzD*)l%tR}tj=h}UJ0P?@fea>=QDkH&g)J2FJ1u02Vbtdv$kaxN(0$uAk5hFBAN4)uEC$Y;j)m#l*6c7s#^|xNbFl zz%F_WYLu@Yk4F(lfAqG?uupS?i`!L-)bp`c{&F87N3ckT29epDyY|yVz*z2hz<3OW zB|b93UZVFl`eHVE<)AmkQMau|n`}AswV{@TFl%?K2B*G{TCVGn%X@cgbF?Uv@Nptd zieZg2X}uet{VHw`p(fXIy<3Xg^?@t75P$!i6Xjr~2`Pb;;UBIydfBP>aykIiwe`+3T|8>y)XvydJ6uG7x- zE+!aO4Q5$7yVo!E{XnckMsq%>!eN;%&&7Z;9`c@>0dCAVvSp&p+3%#>vHyaOTZGo2 z>D)Z_=A+m7l{W$nE>p&#hxasDNYdQrd~+9a2Gf{4X(a(vblr;Mm+K^jSo+cq2UE^- zu@BflKISfr__wCA42=h@Wq0dhqBUIkqBD^l%=Hcu{;o!oEV%Fw8NVZ^7kxXC+4fJ7 zqI_GOihT!}m~al)(Tz#j{m3G5{Pk7tdLmDL55nSewI)vRBzmu%8Q6svpG2Be^KZEFux4$3)CId(dQKJQhO$n03i|guK=5*A;fuoxsOE_h6neE zU?#ooH}kjm+ctSAO(??iV`<0Qc7WHtwN@sWB}!8;zrgZw?5&{rsT08p>{E%k8SNk) z@iXH_+9?G79=yg`ZqziP&GZ<><@|`!3-XBdk-v0gKj$PEgGajH7p9^Dl?{3&w}-y) zSRZM%I8Pr6tWXZ>XPBjqSO5BViqUp$iDStYt5e8v@J^dEAn8q0V^J516HK1Q=)$jEW`Bm;+nj9=HznoEKJk1||EKwI6z?dr_dr5{me#!-!`uoFB z_>j6a77pn=1hup+s|j039D}b9WN6IP?`ToG_CO=>ubWwx1l3l$1RBy{Fc-A|mq4DU z#}p832#LTzgMbMapQ+Sjq3`(llL^xk?hn$BRi4^GA@K^HXF2$wlNPjpdz(8@Za^`#a4u;hkv|%( z1_{}*WDq=q@x%p}j_$j0Cw^ho)(PHRK8S$ zGR$W5#@b=0M)^PDpENL963O=!D#And=_R??+^wMz&N-fT1=_8Sk*2_lc|u_%+mvC3 z%mabCnd;UE0N@dX^ohrMP_&N=l!J;PQM2Naq^q$Zk%SPUYkO8MRWzzrF{oKiYtRG`mQ3)g+wuPNOhxju{;TZ?_mw-YB^@M`rXs+VRQ%E*7|{tpbR zsBcNYlr0utd)H&_cgjlO>@gsDTgP`a@GXR|sQ^2u>Gw26N$Uf}KTEPu1OgThNqNZ4 zrM#p>sHBo!<-=Kw)b13fPV*m`PtyY5RJVDL$_KYFcm~7?>!T#AcWFe<2}A7! zFri!a6r#a)TSeh~XYTn3yZN&v)%>c}iCT&Y9w3i?@twc^GS#BD@4=*DQyBM#QC3Xr z^{}y=YM|dp8+3|9ds=f%P>I37W_m@XND2xZH{f8b3oq|(&R}W~TfhkVk#yOJ=z*z z`5P1?FRd--l~b`OX1o|^NRaXnTV1Fo<;JKb$*W|IK01d9R*0~~Qr_yn$bCrDGdK&i zSZX{_@d*LoesuX%I3-<=huh1n=ej+Y1L8bUcufyN=TZ^8jlLul080{UWmu5FA|_rM z>KzsT2-pREWdv8me_3SeVnlq~_P8Dg3z?vpBywLFP=Ho6Q8upzWNuSrtKP3YfqSl;`TC?f6~pE4s{G$^SMrA8?mt=p7yfE;V|Zgxd8PR`!!C8W zKbYWFUO9%V$2!3k;8Q=GvEMZjyhJ4T5P++H+#X}ngUnmuy_l)6bz%N3M*9a=a%1e8 z`ka+EoTX;LLR-Q{Wd8X4Zg!7j`TPn>t=2(eJuIb27#^|TmZ1&8)TavZ)au`35R3_D zw3o{UH*uf7WTwLe-PL{0blLS-tMNz&ueGP#Q1(Xi`J;*g_lZ6vIV~1DRN*@QLv4Am zp+jZ%fPCi$N&zQ?i~0h!)r@+)*jMah5Fs>Zjx7RX@w%lDbmNCy0MZb*{H$iyV!t4E z3BPSNRn$hvcL9ic_^qd=ga7O9M83oA{<)T02pX&YbGDFThxMhcv>XIAP60ps^}bt1 zH^(^b?N%B8?Y;e8PgFnXuSCE7IMUVh+j8&_>NH>!$-NZxHA|vlB3_7OL*%~-SAVBw z-W8WO8Gi$)5JgomUS6PV4^Mt(6U}!jX#QLH_%z9{(&k6^}{N z$XZBWazvy)FI<#HljCp)uEt&0NZDz6zgna-N^OeAL*A&Z8C-p~72(2r!U9P&o1vAv zFOy867+OmtRY9_9|4HpfF&FVM$elRQb4FU!M2Boe&i_$BC?{bY>Vs0gr@c+M= zHUIH+1yDI2h}CjOR1D2P!D@TO)cImmNafSA-hs2+3}McXeniSU*4IC8}K{h(CF^%ucSU$&f+Q zqnITYs)5>^|D|A{LRS#yk(uAAUzJh^f;wQNcurgGD+I?rxaR;vmlTD6c5R zX%-7pedrA2@&I60QE-Gjj~IBE|D;HmXH7GXElO7@3b6*YR;6cRPi)p0jszNIJSD~aMQ zWA((jBE$}{M5lrDlrLPDNdYR7=Z;B1Xr+exD!%YmO%PfNCvmw1{0)Qxp$4ikeP~$Q z4^mr<;(DZ0@X^}+nap@U_Yi+JW+#CR=E{g1W7b23_o;Z@qHhUHNnfZiY8}SRt^;Xh z^*NXYZf|WUA-i?O;ms|(0jG)V19l%4aaTD_!2g!NOR>ES zR=5`pg-}xKbO05NaN9d?6Une_#Hz$Ia&k^UcmC?-qWh+tdR`vnAz#!jed3Cu*DNUqa3LsmO1=^#xK|-dM7lkRA!(HuSH()=U&Ayn>loI(P!YM5shp012E1fwuz@yiCiWmQSC0y&CR&M4W8j|+FE2%zbWr}YqQNHVw}|LxD}{Oa z+9zU#(2pXD@By|o4|I;sv_qbeouI>71xGY2y*LX-yYf7{j?)yQEEzsb%JVpyZFd)} z5H}|!GPxb0hNhJ&N${dZg4R1pPd52Jv+qqoN2?7SD*QEAubQ_#V-pihz((YM`iJz) z_6`N-&@dh4P4@{*FNfm^uNPahIjwoa2#TiXU6Y(tBbOO6?(1h|n8#C9z~^O8sqqQx zQ)TWVpZ}W7{@o(7Jmzff|F%O8KZgo;WyZngY3mSVXU8;e$8=!>U=;AxI~BnOBES3U0;sRm z=Id+f=%5i(qpd9?{RQn5V}%2gfD;j7bT}&sW9F2KWv26VWtPG7-MZ*Jn`{X?&G7TW z3W^9XaV$-SHM)-gtX)v~n9%ZVXt9r@4p+}RIc?%i#g8UA;;xhCoMSUzy}`>aDaV*^ z{Ez}27NNUnrujZHWeKgJ&D^-f+X;`+(XVTrSYLXfBWUKY&0-e$DGESrlKW$9tIstE zoh$Fo=loPGD+urfx^(6lBy+f#HQjNm_E zOQ;A5aMG&URNQ8ko0#ip1_J+&RZ-QPJ|vq z@&D6#vaS$cd|$tKEPPBVupH{{u?TiEjXMqTf-JTl^Qc9i|EIAR4d-3YKu5(tdC77^ z1-YKMBwRejH~T(T)8hrKjpek5E8ZU*mU|K-WKDW#t>p~^$eU1DX3_)g=T@fYL~d8e zvK_3ctebL;T;Q5o>m&l6j|R)BVexIiD_ycI!9ln^Z6!LAN_g+6*ePFn5^B6y`dOdg zn`$uOQC%z*;YTYb-XLvD6xIDF2XXU5I?Z|Y!0x=r%ecDZ1Z@ams>+G6qgnB?VI1_k zAh|KqnQ;|5U~rgvP3^hbdH{Z;H94RVm9&B?3tnV3DKHZC{;gm~0bs4$!UTp6LuTVj z)y^&lUf;;AN*xPLE%~re`cJT!mY$$YmEkOOrSckU$%?hUnmfW3>S-sBDN2R@{Lej4 zLP#9^8Xd<`Ga_pIr_N7ge$x@iO;uk5_(+PsM-XRJ6NdE^+ypLlkixcbcTBKqh}P0Z zq}v?bi?IzUATAEUofGBC05dw?q2%I~^@e)+&~}91AC8KAiPB$kFS3`SN+SfALJnK@_Y!nX)l{=^E=eX|KC*XGfXl+P<5~53kkNI)@WBJ?ak!9qs;AN& z!+{Kdg>chWY=lXxFY17vebz-yn?R?PP^ZG(dB^ zTE+I4{0eEE>@BVVMaq)h^$CjBjXVcOC>%Bw;cJVYrf%IxFgL0n<_%TqhV?-|f8K9} zkjTn5Zz~6Py^+OLnqoIT#$Soz3iqGbCciL@PoA;K_JqJ7Wmrb&`P0WNUS!BP65*f)jCTafSw zYifF3@T_b!;od^#86xA!Sktc0C&>&&sDHoQc0!^EC3uWDG1d^7h^f#)*DU%H*c6&p zXFMhPMh1l$+Z?{gUC{mWM$gE(AhJS6gZ?c2{@R5&E2f-juITyyidGMfAmddYd?q^6wb1XWm&y!hs0{k~0ldZr>~3>hMPXt% zp0ax5B~&PUGwmE|0r+|cy`ta z{j?J^sXXGLcoK8ZL*AsUci-sF(`4sVjQU4i!-G({eOLqSXgNL3G}|f`Oe{sovuhhO z!9BzU_m*m3ZH~4lMM(YB0X#(>{u_IlWx#dXVM2trnYf%fhThn+LP*|k!VcBnJ3@w@ zNuR43jIf)*s0TeRkrRB$#aMQEha7Nnx93y0Wj6wM?A8Az|1^D~!KjO`6Mj%pI%$ct zvAp;P>O>7vR|qJ#6Pkcf#v;-&e)0$%9^@)M9ZlR(o(HO)@Fkby!YY@Q$MOb_iy&6d z>nLps<}E_cKivu#%w-5NO}4g|GTjWgsURY~$O8pI2Tf?rJ0xIRDnQjk-0%ZnQ82tRS)YKS!2#2X*=l9D17f=Cm4;s zl*HT7fOTc)4IP~;TM)EW1VkxA56>C0OfM^Wp!zT-<2p-I3y8(h9NOeMZ(WD>ICo3!f6e z4PQubvq+NUbaKiWL8!60f{DrK@kcQ}29rD*)iB{{pb!ur5I3;Y`B8 zVMDhmQxEBX;Q{+ft$2|lzA)xfvZT85@IZrmIqMGGSBv`4>pDJNW}zz4yvLz?rU3XF zz+_wWYVQP3rVqn9f)EqYAr1J0CsiXB7`Gh3J}a{33Et+DgG-+W=k_8JEu_RIQ^+kQ z`c4sGLjYBV!&GxcHYkZHrD3C>IqH_0u?k=Tjs;Nm&~%ORCHYf}ri^;}1NXr3KzotC z$g?&ry^oxHEVDC$p!So#)#nI>Wg7Q12T;nq<&G|hejXtkK^Op5PR6iWNKTK0M^i)x zlOZRR5$8NhE1Eo=nk#k2bEsD=hIxDRP8Z8d)vLH_2n>N-#?+K8;jWAb?JpIN$F|XwQy?){(uXAu zu=dnFYb^*t$w@eLvTx*|&NusyBu<&ua=@Inxp6)IVZ=syPc`SoN-7%$BKgZ*EU4g2`a{DKPYPLj zfo0BQ+eskMO7_V5dD4WQmod^hrIzz8=C#tQIfZp^6nUv|mD*=qBw!S8YTFdmRnHtf z>uZ1&VMVUZ>K@ED51IyV1!(~w<&$wR#f&O%Z7$`A&DvrbcXgwzy zS*Hi_B9=GPGtfC*`rRpgASOWl^`c*^tUnq{@69*r9?^)|6ff1>GRcM~Fp+78UT}0N zT7iP$pJxXGlAa333Q=R z4|a*?uY_5!%GATqPF2>T*irNg&am9O!~O?)Z3S9z-dw1|5^wC`>u*I=jLms0Qh;)T0)MLTLywZ32+z}&g3G;A_2?CPSxvH`6VR-=dl8c39A==fD-a0gTeqk_`2`Yn zc|E+Z^oP+9EM-rbAsZpU075{$zm3;q!iM1LlVe zaQ)0w8$@&-qReLWVcNhkClJ-qSIa=E?^K~SPM6uz`NMY!&R%9&Zj+$`AF5=M_r~ab zic`L@F4*Qi1N7)V2KE?o;-V}rpHvW@1_GO5>ca0gyJ0T+2?ynjI+ciSop9eg=|q`F z^mn};Dj6gNyhqmnG8Txj^v^oyyX)NjNxJ6FEsW{UcJM)aM>X6unz}GLcowh zOJhZN+8JeFz3jo%=?22iZEYJJ%W=HV@&I=#W_*-WPqA+}@O;xChrWhmVH-qoKg0XG z<3M9~#)sgIO0*GPHBU!18~GUra!oLR#UM3l!=cI(yN~g-te`xB2Q`lg5G_P=(H%WD z>Ycou;Ib`4PRQF=awQti$i<-#=TI;b<(}u|RwB>G+cU%8Hc>|Mb(GMw_;(mv{v8g| zRUMd6h!ZOYWxSy7i5ksKay>H%0d~r^A^`rp@tH{AV{ASC&033eEi9vN_FRrym)Em} ziz2QET~0SQflJJfV0OaSq5{sXYa7DOuYal6BhXH() zdd6u-wOCOgYiBmmbx4(-RO$Hs|%LOL2~KUrC^-3+SqD9|o3 z6^46jA$@wb?F4w7u`2g_ap&$4TvemUn;g(H!=JB8hu(j9>8Th8L-7mjdu~xcB2**N zuQiyx%#k`f8u>`REM;C*G9PY-Wn*NC)O|NQ9U%t-`)iUrQ-oHAkrVO?D39!wB84^% zVX!Md0u<#;3q0n?LWunwdp}VXr1iwCL!WtxBJ%*PqGnkA=?Y;XlA8t(T{pkBY_ZJd zE&GV0v?i@`-?KneS<>(1(LreqV<%{0oLJf>`u)`CRJM$|sw>*y#sLe3JZ4}sazn)e zA6giy0EEr*n#g6O?gP@X#H&pOzP`7N@B%dW zXi7?!Zqwt_j9Varz9c9Asszi@uaG<=4kM6?pNdy%Fc`&uZ~oKtu5*u288#RUkxntgW!tm+OEhNN^uG*qQ2roH z={N>Xla(A!*t0X$7W2(A`^wIeExdtc;Vc?HH7BX~xD=|%H=2P+RzccS5yR=VXh84Q zI!-)6vy@bH`}vXTxy6G-z6-%BIax!cuejx8QmWjEDTWf17PsFFeJa`|Xx(qXT;@w6 z#aO;=dghc{)u4nhHHYBy?cpnk)80T-n=Gq7;&&g{@`JfTTizAC0ki%_I_EZKzAI-7ZS z90xwd59h_^u8pL;A$O8Ca#-9mijY&f)q99dleC_XB2=?PZ0NGxE=oaEd7|q%ujYN0-#8Ae~!IkRiG6jn_W{!?i+l5Y-K^7 zhP?%SW|~T);goPyn3bOuvTig&vc);!-O6ilZB*uVVNI#06SZMx^P_=C)%NESHlJdq zD7->>KO(~uiMAPw?pRp}q?+adWx6`xT5z?6UUHJy2B6{Z2tn11Xj1O?{$#94tMkKg zlD)60i}Crl_+ z*P6K7kb8M7zZmglALgo>WM69+FQ;>4G-n*SbHQHWlgzjO%26}tI8PIc+1^uLB1tv+ zRx&Q7D;?a;hc^md^DbF%j3g&g^5+C_ppHvs@;)wC8CL8f{lqN>Oe+~JQV9zl)<~P2 zOU@_TM4p_O@;rpx^pAZ zyr~-kTozDv)Qs^F!ADn1B}3^@i4UZ2j`QiEgSl=2U%n$MoLGG*HJ9YPU6ck_pvFus zck?U$WMF=%7}H*VQT;1$ zS_~`ygL)0KuXANrpz~oy6kb9p@}5LMbnf`%q*vzU1dCS3ZvT*n;wT$OwRb}iw0kCc zg_azWslQ(A4T!sXFJ72*xPjGwtyA?+sBlTOS*WZ%O7{f^fLhvmIP%KDyK(6ov7?+<=Ka!gg<&(Jq5Lu*UzadjEgW&gqY4x$l{}m4wNdIRVAwjp|QhE zZ$}BD@pEN)^BD9qsyd@!M~>Q|R2sz!w_`GWFu%{>vF@N+q*Y z^8;@uylb%hZPq~L7ULsrZUlyT)8YBdmU7v(DgI32Sn=Mys}ir1ao{aHi7B+>Q14t@ z;v}Y(i%w&AS9f=SvZIKc4TIU!^)dH|Ys8)>PWBw9ezg2Dg zdYFkF-6|CKd<)+9H)1x-9Xg#*|;L zEdKkO7tgEfHEaA^i}lY;6mvKm zOYvdg|0Wb)J^I17ku=cXlUS^ zXt_R2+3c6b%%0jtcmbHO5uFNPRgW|Bb)9;v}Y(Scgxx&j!hzA z5#&v(|13niM^F5BdGSCF|8_C%`Ot5-{!mAco*M@K?L5eD(}pb6kj6Xfj$ne-kBAfu z>9I_wYRjhOjpPQb0s6#c2!~&uGXmtLCs?7?8hKqz3BB8n4pi0Mb6E1#3-hi3z0lC9_%6veC>tLyYo6D zBCA{LVvlw^f#$_?LCCnD+_Lh5>I?@0qp=NZZssD+4Rk}0g~G?<$bB^?A66#c5ha&! zjG`zuG^)giAd7Q6)oiY+#98M|K(1PWp-`7Ph_cxXg$64BHq{7c?b0qOvpvbc753op zRW+@QD8eukz5wfrG`OdKr!JQZpB?B-C6>~v$(GQ{&{t}Mgpi^i*r-w#`rVt4&~o>`BtPWwq!i2A?4U(S&CK-@;7{7Sw} zxRp|F5rG~=pAmy&(rU?v#)Sw8L5e>h)J2EZWV4qkkGY!Maw@R7$ng?V_o!h5M9s6HkE=){!yw?33aje?6;}H1wkpw|J zL){B_x)1H|N2sXTQQ0}gZ@2TZl1J+xMW3@9(^kuV-6MR7K8W0aFj^mc2g$LReK0i; zHsKB6IbaG9U)K9VR|3bRj2nO`BhWjrPsR=V+R;2o3|-oV%NQ>F!J^l+tunAoU43O- zQ{tiX{rgMWtTj@(ogTL92meeFgCEFMrK*Yo%0SOKfgU^8!zo%?cvF^w< zV|P!xUV5(7!P~*t)#+LU#dR z%lCo!cPyEj;QYV^-xun!xbVPIdA?A9uA8>P#5!Wfu29m#+wX{F$N>J_76tm9bm^r8 zD7w*+^~TS zI_5fQF|$bG#v`?OUYTkL_eD&@%~L8s2`7BE#(%j2IrMHOm!u$UD2cS!5FJiT2^Jiq*bw5HTIxv*u%oGHsdIIIykUQc!`K|@o>O2 zx~b%7migEah!mAO-;$6)-ani|X?nb+Vf82)Q{J%#m3rD-{)F1%1RQVm(He0foCSBK z$y`a&+j1ch$(>B*Qmbh<^6dM;lkBH2HLOXXZX$e0H|P&AR|$zzH5}o{E^9-4j!+|{ zjTq*I;(NI``z<TA*upfaGH*Mg;;g zFPgqtuvG6(^WKzIEz8+kw}iUvo@B>5i~W3MLcjM=%me{>Re~@6l_;>BvYIY7?S)y; z%-&%KYdi34FNdDM^_6W8yjWxs{t@jy=cuV**>ODGXNFWZERAF&eC#9RBiXi^DEMED zWyZiboS!{fwPjX|U~6q51AT*C?fhcJ19`z5z>USyznznyQunr8m3cFGvZVS3L~2B1 zG^FfYM&!EuMxV&`&60hc&Dr-?J__{(r!NHom4U;E?$ z1lG-YKvTH6?Hfg%jOWKWIg-@q-IV6*ZX#sA!9ztnzQ;yN59x~>*hxOCi+3Qg-6!?g zmKIsEdk5Yw>SKbBcS*(nC3vq(BFeESS@D z@~O;w5>*t5%h%)|CB5w-0f(t0<>>3uMCvq0pij5Kuje7sf6thd;LL-5PS_R^j}I0U zw2_gSHf}GUaJRwEJ9TJQ=s&5r#$3B7(_zV;{ytnjVu#dixm&#@)?KlPEyLT)1ax)6 z_q#&UdV?lbb=dV$uaGutI*ra_WJLQuXu`n`R66+th!A)yU_O+l1Ohs7=eP*5IIE4T zQOJIYF!fbSHNyggc1pmy5YP#0-pG`NMPoplMCf@)Ja03?L(xx>kvdT+(|Td9b%I-F z=tNJ+(3M&M)EOqrA=*-W3izN(vIbDLa#ofte9_`JZ&2Cr;~$qw*&e$3KAb)lwSt?S zfF^)Fa9>CRyR{vb5uTf#UpbU(jgq)k+mp7OO{Q(kBXiE;Bntn(E50ZOT|ELk$M!cE zP*81H4?@Z)imx|CZkKH^Y?EXWmquBV-Q;4^xmbv@^nBAon%-&-HWxu%ry(t?tv{x( z%jLB#tFmOM&i;#QK55e@vt!v4#n7s!aZMpLNLT%!dTDf+sDxnG5yOH6ES!Rll~G$# zj2BAl@#9Ms#l`e5(pJT0mm0^Or5-?iN+ssou9J+=SHE+#qi>lTqyD5t8MB)-S%DJi zq~ed}&3`0h#ud|dVer-K7*mR&TS>Llvzq!6xOx#8is*=S9XV#!o31@T_-x~lIGGoX zOA>91b%xIwY)*OO-#R%?`_E)xJMRUIfE)|k^S!Lzaz1r|>VKP=M0c(K)!C#OZZ|mC zy|>>JH7S@^i+X-eE0a1La*GXe{pWGCsFRJh~kc^IYBxs-5 zoe0@dz!&O$%5U{GuE#)uR^9GqM}co~te#oVyp13EMb{f`82s!OUD0`!)Gu(uA=e$7>rU`dQc~^W66ZXlif1V4xZP{}2Hx zY_!@9FE_E+koHHc9pqlYxDa>~o1F|n<-PzXKJqT*0{x2Gl+?$k&J~tWQ1&Jyu)Y0} z*IPaF&9gPBQ<-nK6_LeC7;IxhIpIs?2IJuh99O4qK+onu-1{1_VF)!id?7#_b&8=H z%gV5%eVgel2hHR^<#O$@F7H_QaPHN1=>sbJD-Xkc)Cc@>`VAxzF`!8jE9>q&+Rthu zCSR4LGs+l1n@G)UUgXBV`UyS|vL>lkAs4e|`OOb_SzZy?P42Q7hCqyH z^bRLn-?4+>3rDC+t%#M~;SLU)BxtFf5~d&LRcVzQTsO|?S9T9cFqvr63UlO=i@#$` z`9;kpX?d$pLS5BPyP_$c&(SP{*N}7`+C9$ z=dT*DbW@hn6{dZyhF7B=AY_*uhHD^p2+TECSl$2Gl|T^8F_h!E)wRzXRr?VjZaM9B zH|DNRXL+JZ&%uXcIW0*ZAKAn~t+9W#q;m#Xjgku6k zq8l6nk3$Or4^JfVPGme`qqxe~4FnB<1552jyo%A$Vw7k$wF{74TpyuWF)U&SgfP6E zEHcR_I<)jdg^Ba!Ngb%4u1Q@_@;`rhOTkwgrSJw`*Uu5VRr4#>J&!9{8L25wg7k)V zb|vKUMQzM-RahV((M!;6ZA9Z`{3wS-3!l=h@ymf35{|s1RiY~1AtKtZpg$ZeZ!rNB z!qKi%XNcF=C%{adjJ#L!~`@mU!b@YGb&+2vXVZ4TCS_TTNl!$n3iiC-h%J} z_>A7yj_&jYA&)B?JYgvObc=|9KuH@KNEVvZIgzCaGo^GI21328-ny;8#WI{X*%Tdt zu^*9}7WwZGwY~K@fOe?Ro3-q?8^1I&W~pT#Y-xQauEyo`>UU7;Ex8R|VPF&c+WJb}B&l5wLE5EXvAu%|EMZpr zIr|Bu4MqkkKf)3+r_b|3c%1V|^N$Z)e*? zFUIt)Ro39@$ia@VPCkDMM>LC$yIw*O`5N8`Fcr^ICH-t(+c7RL7YfifO{Dd)=AGI7 z-Asahfz<-{cIMTy?fqLF@c%`97#}zfNNFUDfcsr!kpF;EQlxU&SQBYvka!Nu1s@1G1psKAM8M}B+GfY zA~F@cz32b6B04E+lcp#I)ki_<$63Za;KEkP0`*9tF)|Dtb8-;&wk%&!M|{ULC`!vL zCwcE;o!+w2PP$~tt5W_bPqCRP{8+jI@P`Rp`jxcJK@q{i%@oaAPcgweP`nMuAkO~y z2p~}PJEy6MjWryeh%0+k*o;*TX=qZ~a_8c}!Jybug8Y^qu3JN!eKM?}YV54mMv6Wv z&G+wzyF+-I*DEJ~*_NJ(p2XSh0|iz16@9(nXl4S4TVpHq&qb+lQ4E|n-?jA1Vah!Q zxcqNyj0e?)RY;s~*fG=9!KO2wPvp-S49TH%L#)f$WDwyu)|#NGyY@b%(DV4>z9qE> zCx_K@ql!K&5jZG<9|-VCl7PvJ1;y)1*VSM!QpWCm1f}nf-7Q3F1#g$78Q~hpTQBrW zP4-PEF)fjaWAi`(NB#oJRGW)N+|kV!v*P>UIa&3RpqbatQvpHNdlVaK#8XI^xk<*H zrpukM0XJS?XvM4~A|+}7WK?>ihyp0W54>?~x-eoWvJUTF14T{-F)&4gx{kC#F=j7e zih*EA1PF$+8&Iz9U+I{Et!I;IB~Rq*YpMT+B!yP{bcK+yB8;HIOf|&E!G~iJNvRp% z-?#2*W*yeH=MI1Dq|AgZr}l%mSsxh3e#{kfsGzJgE8>9>GxR*vU20XU&q$*h$MD&y z_TXMEb7~w7jraUBez^akFl;HeAAF=3XdXfCtv*+vGC#gsw3?$@1(FYg*^XYBs7iz( zfu0vl?v?-nPme)q>x0a4k>d@N#!P-(VI*{s!zA>Be82#?o$vj)Q=0NoHHm)IWV-bC zIrx)ph5VfJAVMESE-*bCgQ9tFx|vsDMP~m6rsj=|=$4VL14LX$!633QXXw&U%3UBx z&}nqyuLmI3%5Dxf7g zF>AqqlC^wck9syICZOVW9F7GY{R*!~)0l1yWLKrMf?u3=b29NZX8j{f^MzCjcTJw& z^Rs@q=~T4FE=CNG8IKl?Z8any1^H0C3>~u$q(%VFS@stfX_K99(*yciMsJ&cJA*liJ9@$jcMU6$)2y7RuDl6q(W6zApiBy+%Xt8ISbOn}I!OmH+2 zH_cXqwTVEBrsPO9C?p~98eAkTGavT@rA5Cnd^K!M%WX_u;c0H)dUY>4C3TnFJ>hap?R#l$3>{`H|o8CYp zvpA;O#d-vV+mC}2Ir@d|O^g_K{a2ME9lMjpA1h;id~&{lm$J$C6-lbNeAx-0N{pfk zHRrnN#r$_5-yO_pa0bdn^{K(R%clAV-2EC@pAQTBjB6_Nd|Qo%*8g6k5&N~7{oG9o za5-3Bbza;L>Q#;xVxqYUS&xd99}tvbzBp^>Ko!ZMU`7X(0g-k0*-sa0Eh()_E1_E9 zK6QT8h~mas@6B9hpZOz55Q%ZF(dH&jGk5!ug%WaBJt>JF=gWJF(kyk&2LU_!z3@nk zaL7#;q#)pn+WzsKFW?#Mrm`7DPu+!5tz6*At%DpB8_jWa#=uqcdlhWf+7T06TZ(p; z{9)j?-xDxIBu1nK_JmCvYFrWV8H?$L$!q#)?!uF=?N z&e{wr^5hG?PT3h3(g0sbl-_$jO%0u&wNoKqb~dI2!A`XW5>lo}*5~?<6M_Ac{`ae#||8TFG?CydP%@M02s$sJYsxwofQv_#ania6ohcjLjHEgEbCVOPgfZ z!!~;#Z=k%;O*RbR8zTP|g!%y~x2ke}jREz}|6b3^q3*}eZVg?C-mIv`<~F@}WTeS* zo+h<&f2x33<26rpoyS+bjixBPa57zs#j?TQoS=bu)A{OM)Y?sm5+MYb6!H*Sc0NVg zg&nN*663)gZj*sCow)JSlVrtfak2b7X?G~LEPP;^sbMYLw7+jC1u9WX>B(7)ZW*p& z^8(aL_+KF-L6XdZk1H}%+1OGMawa$BLEB9IU1%R*K1~hCORh)OK~yXsykxBsjEo#( zyuK*4DsiXm7k~t;lMiBQm0p>4NMQI0M!&DMW1KFA+$aglYJ#xXey;XH5`Ee}M?HBL zDJ+L&km4t5ashD;0qb5Uz*?x--)3{3i5{tRSRL@v_UDm%Lmx&MkqA(Tma5vAq~J1W zV3^mn{{o!ml;6Ebmeb#!_PW(LOnBYkeWuccp{EHIf##3J7yh`>5Q8nZ3t?|4LDq^< zu@VFm!4lC3rx0x?0w6bEE;T8W(29P=+s=G-MR;YsTAjvE2Kwm+%)Y4L0g4r1;9yXA z`bW*(QZyJa@{ z|9hVQtzUle;0$IgB9$hu{{vzH*ub_+JadNWOV3LRas`;3!|8AncDp9u6=vK|&*M9n z#ByStcHd%e2`aG32MfSNLLZTxV9mfJB-;1B;o5V3x9qHGemVsWSk>T8vUyn6f*CYA z9;V<kQA@kP|serkGvT1U2f$goL;FRMkD;A=c z39an-Y&WYQzb8$=N*gMFPFN{dr1ImdU=Q`4?^M!5pU-Wup(PeoG<-*nORhH6p~Kx! zT$!P(Xg;uwM3W>y9L8h#)fOE%CkF6G2n_d{Fr*0T1sc@3;S1lQTP|@JZ;~CYKn(G` z_^6>_E_I&l|0g3J4ImL6N^Cs4@+^8Ox?Erd*T#C3ueAt~NT^#@+0AOgbO2oK8(A5G zF%s%FWFlrjw+j6VpK8U7*6OMW0Q8WdY_AlNx*zBIZ%jTh&ca;O94EGpR>Fi8j8OD(bWm@&9OsV5z;lp7ksz$dE3ou zOWjX6LGUxtke?&jerf`X^&d3*K{NB}#dskM^UENs7VBmb67MuOzl|4-r%B248JaT_ zD@?U3=ar@CUkcugO-Qq^Xaq7uc|P_t$x!&;;ipzwgq)JJmh_0qYA==|U%1jeK^LF{Fo-5h_ic_0!3s!)tT+O;m^duSdX*ZSYhujMO3A z&@rW~YQFhn(g{WBqoxAfC z>L6P%k-ATLBZCopzN7soCGn|PB{apzQb|TZ5`$jc!c?btYQlh_fkewl;1$=lv(7>a zDO-n)3qQni)f2n{PiISPaI`@cw_>fPC>~i7W|tYMQ1>h&r(Nf+gIi$n9KxZs{2>*T zDhKWaEi3yN5s43$F)}yu1L`)aI3cdSrKlGp1`JAyq8gpd8W9Av`|rvV76R?>-wS8;e=VOHp&|+YP7R z)%$}y-}VUTbE8Dyq8Xl55bkD!$~40Ji%5RF+cn(BsY_sD(<(g<$&-JVpt2 zA)g-CpA$k*o*>FCyA^VKm?td>FOK_u1G%NdY=gDQEC^R0+r` z_g_k(+9S}ZkXG4v50Oh#5SaVPAC0tCq6YsV;XyQ?t1*_8Kj&G!Kn7D6?5d!MJ3gEC zbm24xe^8ZI;;XLdryEB@jO7lM6g&nZ@$W`FwUxzEj6`6rB)gw)gIJ@M3sJh~&wzPa z*~@+}9MOhL%z}r6=Io>5LTEd2f*yH!cpYVJ1Kq6*;zUyx^X~p!?LlAsRV%@7536ll#!r6{L;JkQOiWGYSsc~j0`x5UCy3*y> zv!NcVVBLx-Lc-=>u1SpCwa%_*B9ID$YTwc36Our2h3&kyckZ;Gx`P7v`zQ}b6aCk5 zg9Rhhbp=s(Uapvk%M(@s-*seZg4zjO6HT`#%RD(_yE1CFac6jgig=n3&||P=178LU zK4DbHMsnTp%SGY7+f1e-cGp##>tJ@*cTD50BWa)6N@l*GyzM~Auia{#sks{ zb!FXSMqLN`#edPGwKdiy&+kk~Wu~1)t!1Qoe9Vvnyb0jvkOg5opSb4YtzDpyZD;LZ zHP~p`R_rzDl%2ol8~xtZ?4lI6p3bxZBRZhlpFOaPvSM2EN-g_Ki%*Htx}Sn;OX@21 z|Bvc^XD$;Zs2POy`VXQvLkPPeoA=BC4nAhQ&vh699j`%RO=waRq7gz^{xzH8QZVWV zHVYwj)m5K8X-{oKjtc?m+H9wc)$MdmE1fOj2(Bs*<0$$nFovMYmwdZn>{#}(NheA@ z?r!MQjjTgv-r173g2y^n|8t@c&qXGo8el22+~Ua~TbB()aEl72Z^S7;Yh$bDN~1tW zsRax?7ox*1^ii#_W?UhWeo>;HGD`#_MxU`W*)VP3VwnCScuyat!2_~YjA-&~ zpA?}j&Q^))l%D+egWBurTR!46Zygx}sfFZa3a{7DBCo{K?>z7oWgyr3GPTkuppMiI zAlx1Ap-#wD;lKtqK9A&PsPn|a5tvT_cL|q9dM2mFX)qv(ARcQ6TWj8or$I3EjmXeP zo#^$Le>Kj3n+VV$7@nP|i$Yt+#q+}?y9vb}sf!)uiQNqW zd^7VDu#N)!fgAFKBoIb_FaVse&)ADW^&Z!>g{Ts3F-cLTUR9;z z_fVS!@Wee$cagsDyhxGk+^JHk=!*~3%o>KR?8Z@OSIRf&*LVZQrh<)D;W46MCuVZR z_EY)#_=VC@#rJh~1Nx<^H2G%dag+~~VqIvZCi1j}-=gMWU6&6fV@H<82UZZdE2PI~ z$TG|srNyx%#QJ5ArV*TU412^&Q}z_El7!UNj^^IA0OI_sjGa@`cb7E6cp;qxiC5!! z0pNV;SP}1@xo4$Z+8gHth|x-VB}xwCM2W2PQPtdjG_e0Pp4T}mkagX;KPLVu48yQh zdJUyN(r8z{7UseES3mcWXMo7;c=_9giR zv9eIZq6>j_1L(g!nxot8lx(8?KW?l5m2EXr&iQ{ zX4@Cl*;HPwJC0t(Vq5~)dSw4L)|0%w+OiMWImT_`MN_;+^0lXn&v^Drm18;EtE^+# zN0(WD9iD_n&n1!B<6mqHgXkgn^i^xglOl5b}%q->>4K-`5xR80l2Jy#y;oGr-v$rV}ufL(c z@w3zExcU+&7Fp0@7QCmnhm#jTT4iS+GW7_T&$$an|?aK;L4`(g*%>oCbXL zj68?#CxmBEK?ic;YW@9061dRk-%H^yud0gUv=ft=oeO(6vi^O1z*W9ngc+1m9PKoK zB|ubD&2kKc1JplhuzUA0TX~WK!7m*?S{oi&QkZ(P>>MlY?T((k-K|&`*Y~#% z^hb>l3MywnovCq;1+T9gYykiPARB`tl8>0 z2dH!x(+VDbM8T~)g22Z9S%zE14Ft{IX@O2W1EZ*TpWkdSZi zJBAHtqx)mwf=-eWZ|tD3;2dUJ_IG92f__(6e+muJH$LNoXY>O->B-J25vGJZqA4*N z5$-p6>r4>9+$7pAXH1iQW(^U&2X}JuX5OY;t8JiVxSdL>mlgG3LboE+4~)9RqJAqD zS8-7=#pfiS6qpc^QlaHz|2)`t0p=;ABYB);mHqU&O&_3JsSU((DFM9X!eO}ZiXnuj zqWR?J;4iPLRA+_1$J~{<y0eS>juJ-{loWafQ^4U$(fUpPW!~SX zpn<@JO5T7jSQ4yX12_8@CPgeAn6Co-lV=v?1Vj;%QTq`k!#-k+QkGMgtsgT%Kw1^8 za8Wz|6wAfxShx9oQW`CDkc>3Fh6WHRvRU)H$G&(XBy(AsMs>@dEwdPVozZo!SoJ@f zC!a1xO-z^ZXG}&n4MO3PIDr9Ad36~w_mQ{~P_xRTeg3+iAyxRm$R6wHmh5_-X0MIr zgoe#a?B21MR|jXzf$hVRHr!EQR$c#+uJS@H-TE%0{5C!E761t`8i5k#t8)EjkKDDo zp{q@-q_G)$i@+X0@BP^m?t*#mR?PU44Z<6VEMXbtg9hta3`ZKqm*Lac=Y?ug-MsLX zn{}G2ow5WkbBiK5gIC@{z@d(wHHn}Q;$A2G($E?`5nLI-T1kR(m+Sj}zF!RG^n*Q6 zGsRxmeNtrOdsprn9MG->K$+Cq3EUqcKvxUKfO1YX;vicvP!TZLQpyd@a*Ukhs7Mc> zQfe?kTZYgWuLMc*?}Uob6sA%o!~C~vb@ef^M2P#@DK+oCkYMV-YY_~10cbWuIQUcV zIdhL(g_S^nYf_ochMG=fHlNZ!6Z#r*jih!T`|FWg`nCL@$W@vs-i*Q;l=c`XCR^(r z7um=hRJ~N;`_#_K?7;r!S-qW*zx+lPe^?ZVlZ>d@#OLfX1cF2{Soo$*<}EmU9?ZBMjkaqhe_8w@nC?*jV=rJIHeyoQ$3y}$&ZR-FN$e(kdVdr z-Jxu0c86Ts81oD~D56`=uVQ@z6u}0^;Aoo6=QR9TMl8`J8FV=h_s;0=(*ogqOGDrB zX&b6;jj?XkNq@d|f|LoZo$hT@ZAts#&?x2>4YNeGYIWifzBz!D>A1Xp%jRyt<7 zrCtjnCRD2%Xd`9-hbFD*{?ol$mh|x^qZ*nYDM{BjB$~)Rsj$j|OZkLRUH&4s+~mbX zD{d_@*dA=v$Z#n)Luk2Mh{_!G1Fb4foSkMstVJvlQE17fy!0_8^d`#huIuPQex$v2Oeg^oCaai)27?uEy6A+lzVSe>^Y!e*b+yoCMdp z>&uM8#xyfnVV1)Xt6_pl>w=;u4_Y62nMOdY!@l;-cQ&Qky#=TN*c#XoHeK zHAMSa*b5iO{JxZ(Ihs;VJbAxJfcY?cyu(CVAmG}UAyL{j4c|p)@1z%j#k}$l3yb>q z{||1mug8viIU`vE^BI*5#9%1q1ra^l%{S7qV7<;{lO|Ztg0DZ zg_ZGR-%VW=3KgcQ4%}UnQLxy4pUZzX>m!!-fG2QT95gNYd%jH=k%$J|;rAOTSy1Kq zgwTn$XF}egk~y+|7aByn5YKFSTG>bYT;>&~x^GuZ9_j9bM?x` z9_2|hnzxtcrQX}Or+t+_jmK9C=QHhGk<8a&LecE1LA3!4uA%a?Ab4XdJD?1!fA<@ZaByfQh=AAzYU%)PMW=1s2|RyDwh8k8gcwh%lA~2CbosDFXWO9A~#o-V3_=x z72LlJSUXUAk&7b+w21NJMVzUjBlywl8Ky`!Hb>ayZZhG>7ZPBsM(_q}`A{9TCdOP8aXNG-0Oxu%uB~lv1jibH2wbB=6N+Q*_4R2?TcI*~0(^|C% zRafj|slc*ezx;KN?UL{oBmG@G?}>)(*w1jrg>BThM8^U;A9ZKdaT5^cv$M@e^Lw6r z*RYq;6UgLS2sP?3ShE&0hP-mij@nIlIhy!+77&k@pzUmX5z+40{p!-KCV&>dflV{D zSZhs(Rgmf39M3ACrrpYDWu>zF+3gH;cE#Ra`SoekiJy^V$?Z6lZVKcgq zpdD~TLp(E=Q4cK=)t+fD>5hGkuMP{pT}Edq4)Y8DVZpJRjpNhkJkt z|AjZSbX1*0UGh4vdmGh&PKF?5Euv6t$VYceu`eteJ==%X_{uj9g7ZKfPg2DpD=g&@ zQ9xA6kuNRgdCTJByg70?5G3O_GkS=Ogjo7Ap~NQ5H0+HX15E=lE5e8d!3X-SP>-On zx;<>70LpV{+L{I6Dx+rhPl=0x6ijelp{W#d&)e*lHjDm+6n*6O$x8%b>hXB3uU_Qc zjzxm8#?xqz%YfYz6Mn>}@VxM+KJ$GzA|M%)|NrkoMk?aJogjW%XkIsj`|!R4Zj%gH zU7zW(8B~`1lZ_jk&VZ`1V|u=Y#pVtEc5g?&DC*w zm}<3B)8KP1`$e7B5R}XBhjp-Az#022(v)C9i=PlS0&>s{0`^b2!$CI0ES2N3JLI-` zSXsOv)KF=!BI_lKL`wN^Zz(i(8t=(zI0BA2RH?@~OrqiKH%hAI9qikSaCb8FBH*B> zMdPQCDvkbMrv25qJXbOG0JJ_@AWMbC+56M2RoUP?ov_m&y#m8a%Us-&k+co?GptXS z44Y(m4B}s!h(DLad&_z3{~`mAoqC3$8<2FJg-`Y_5~_|XD>d65cT?S@G7jV{BuA^b z)FF7%gQ>-VoV%W-evVMrz7XExO%`z(tzilJ4HuCtN+=yle-v6_3E0t|zx4pkYABBY4c^*RpS!tfy!9Zzr}_NqVe zq0_9fzhn#sXrm=GIB_#koGN4nV;Rk2(d8++f_F0IGdJNaG(R5x)!g`C|6JA9qp>DX z-am*|fhhb`K-pYqhTW98Vk)1weHbr6L-*ir2-Bxt`5Km=UzE7iPX2qrU1XIngog^_bwdB;9OAl?qAe6i(kNeLZm$*Yp;smS*iv~@ z*XMODf-#TSO>S;B?sFsXi)M*eEV?*1KlYB_JTZQa_=S}{Wkua9tn`t_7_BtNhb3i; zCxHW;hb;NJYv1oQ>8$c`mp9y$X*<5tv4B2dxrv4^4CVG>TRm1!WE_Y#U1q%jQNpO7 z31YIXWEHceIp&w#D&0UZkeB#G!_;MB?=8IyB$aAH@IkB!l^<(yKlawyAc~lPE9XsY z12Jr7lYn?dt0ho7GBj=-GCI1SwVSHVbn>@;X5bIb#!knuqvKqnBw_`f0mw5qzJ*$e zQ4-GFqmrIh2}mVAmO&3YvVvCIB>t8I>?FEqw~z~ed355>0(~yR&zYbZV77=NX`q=y z0-}J>0<>C7(TN4jD-+~*vrSuNyOuZQsI2)ccpAp3+4ZUJ6A`vsh4x>#&EeWDbv!)2 zu#QCiR~hQMu;(019Uy`rD>#JuP=oLy2``}9h@t@cwDUeR%4(DCd#dl*PY*$@Z#!aS zdP2aSm9E*Fa+!ny$a_IgEsjegB-g-8bU#%XUt{{`92`Y5WK->~&Xy0V;l3m-WHn(ysK70}2nsd;KS030`d=zJA2zw2 zhGW@N7}Q>EKhvaS-+LKur<0i!jwleM)f0VCY;7#|T2yr_!0%&rCn)PRn&<*YyQL_AMhSYKL>NRohciyR(88pfy<+X@tok2^JZM{;R7>*26_ZI!{&;` zVWDxoz{0YK*XjnOAtsQMnGr6nkL%u$tkU1w38kbX#M(p7-f=X z*bt0h&P5u)^frE|9&w6|Ih91Pg8TRzEWB@hUUO)!lKwt$l74fk=rmRcI#{n$m=J!d;2xVClH` zs@+Z6bWX5`(SZtyc6&#yqwlYA-l`7(=zdhToT+*Y%`l(VoY0FUCxAgyTb ziI>n^vgZM<;Ky64)dxMo=S;;xw@=dt5_Yf>y}$wK_Q-{C*h?2Zm1xirmw%-$HJjUWN*9|Z0 znybod6(94uAy8|ifCEMnyd%a707NizKhNClV|$dswIvfNO>u5Yj+T4<`t+Ai#VEq& z?Mo94{IIUCnp;V{4(F77Zu3T%CF&A`8e%x(#m6q!Ny*&y#Qe{L75tPQMnRv^?EL6j1RK!# zx{U5mKIdkGO|GQxHAfgS>HlFkC*q`9xHCW}^9jji`hu!57cVzXJ6!s2->a3$GwiwK z9{WK=_Ky67T!&{pGpyPuD{UCwpkFygOsgfjsiQ(p%T|&Ws$UJ|NEZmMX@mOZpwzx=pxWKi9<9A(WElXA{=ejvqHmxXCvt?v79{^#WA0PT^u1W*av9rkdFCJ# zW||NE_W$Z+U`YyV7FL%wdrA~MqBh5vM`6(lb<9T=$}DWiMez|+8Inr917Jb>KzgN4 z`-)l?b)+z8Pf`Kfb=wkaf^3%RijU@7(Opy`no_6Bq!)4dl(!S(l_SMyhY^C!Zi9H> zqD>XrmTvFjHMpTQ3V`3Q{w!Vp!#VaET#-qaC_5we)P1$f8E4;=uj~PQPg5lsu)#a? z6n-ZoS8N%I?AhJcY1ZY$+z#dYC??q2-mpe#?W4$n^n)16N#<(3Z8X1r#PO##M#@~p zpJ6~}P_Z!=Y$f{-FCGCs1Ok)526ZrvAw@EG%q<$c$&o<5T`j*7`bu$gYlRMFkzq}G z+jhv+4y%i4?k&y_z!DUXWoKym0KEq+juWn^b+y%9eTHW+^4+ODnbQOEL~~c&SNBZt zirR2>j{F2fY{t^bvLm4*F>xDN@P5~)f$05GDY4&ki`lZVeI0y@bnUtLJ)ocEX{>B= z&nqn(QM$IAA!{4U83)J2vR*1Crzi3a{rDEg<+K+qZ+n}2E7M+5Jn!=_y@nu_9aKn1 ze7O>T-16Ib$q&nQZgo#U0@Pib1`+ZAS<9HOoq#VQ&QU>2jN&sXTS@gA$04z4K2c9# z4@uEve+^eMEyg12$5k06H*E^B@J}&h*^3EPi2hQouU9b1{|NLwC(6|BzCBG>5-l#* ztBAi3pBi1SV?H<5RkyfWQncK#;ihbX7MNe9l7`5)iCe21o-u-0!8+I!yeLCGgm0fB z4AdPG&6>arFitiB#*DwWEDx{1`2TfQ+*2eHhq_lEN1+)489_ov6=kX_E|~z zj7xuL<|NC+Y|BJyBAfJW41nF_*Ah5Skd*MOK_q0Ld+x;!qmTLC)`@6>&sA4h{=%g2 zC%;n%&|xOf;e%8Yg_gIRPTVA1n(8r8j4s08;y`r_*)5{9(jti*v=|~+7W!{6`0Yo1 zQ%ql(2h9L-!HXgd$Y(AHe=2>2eirrS^{hOn|vH-oC5-%Ajx7pu2sXsk~=koON4W0 zMouv5dWGs2M-+cuwA}t%mRa_R=-Mq?@pF_v#pV3M@+hQG72q~+fB@*XX?>lg%s#}| zvUmwmzH{Lje~@MIuJ%9|M{xSH`tY_N5jN<9g&RdMI7>eIN3d`4IH8TKsS}-RxLWa; zUC57RC}-eBs5iSuuQy6|bo<+FYRm^*PLjjY{@VJJhiE<$X2!p?u?nYrw;r8VA5I-z^~PLDtmWH~N{e^*USA!Rr&?y$+HcKP;0J0i+@qpRf(S3JjgaIuWX*=`9==tZNy z$uFWwfs!Z2(h@I(Sy0P>Q)SbNwGCTU-`W=S*jT@sPl1wbr_G1fh7Zl&P17Zc+M`H` zfFj%%TAhG42@g?9g>LMWnBLaz#W!=N5`v%;M~pE2D-RfGlzMwmT4>m!SeONF;3pec zUu36rIJN__W`bE#e?nuTw~CfHX2dSLCYF;`>DgKp4+;>8y`9o)`*fPy3;u#D4)oB_Mdb}+U7^I}+G9|00Z1tQj~5+|MfwxGT23L* z1O$BwZTn35tkc@^v{(FTfOMHv@inbytSaSl;81R8&d^G9rZj90v7`jaAiHM9+sejf_tG<0%$OaN#VES z=Y%zVzv#lX1y<&T((@tB4Z4j5ac~d8Pro}CG$FCv0_(qeIYkqpeZN=I+%1utxGh5V zzm2q$pv$b^{Yd|IH|{tZGz*V5)&`HeP|Cuw$eGT?PUDn@0w-9<8TrNgw^8i&(Xl9` zog|GkJD}kMO}X7!DWc}N6vB1iCAFH^K#6k5s@;TS4NEJtW=gOc_z~86*e9b^ekp`Jn1c9;I4WyPKb5l4HZ<6=be0o$zB5240aP zkT-bqI+`&;*71aajFbz^hMX+|h>`oR10-@Bn?L?HZDzXp7UzV^gNzef&c`HwE(ol= zX}k$4t?Xz@O0={8c0{g`cq=8}Tcn~KNRTD!!jfN8-*`LV3lg#24u3&eDzsExOVD;d zh$!sHImO?g^AWAX$>sGD7PWL0t+k9XeDMA_Z#$$Di zIn}-qt5P*znHccfs|ESCYU5v50s`3t4@v93enXSP(Z5vE*Hdvk2tvMCBK!9us{f~9PUDjhT2IflMHm4_}5tFGKwPJOgTfAmApO`l%HHGC#Zg+dND2P=g zjho8#c1Oy~81K)O2lFlI;iC5+YeDaQlsaVwQBn-g%Zbp@9f^J zT=c{AP27a58cz!gTf(SL%{j=7831;5BlHylR~N`ek{DVKY5tme>LR}>)P8!ChdAbk zdxKrTcA~@@cbjfuql>V%i7}Fcdz|e)6&Rp2_*>(hJeM$qm4k>MEG}~x+d>-*qcowf z;HJAzK_S)(Yi_jHxr`S{72Q_L6(J_uQ>SqwbV-U0slyr236>jH<(rjsr+9Ix%&4_Bn8irA@>w0HKHmHd3#{kW!|v{pq6T|YG+$b+{k`OJa; zh(<-@QP$YgpHW{429y><*iHtEt~LGHX^+vw}GJIzG359fYwX(K4 zHgFhvbBmmE7BXs(?+r!CkWD-SGS2AVX&dVB=4kk z8f+lS)m=R-r~=Sxlpnxe8uS?IBK=F|e3w8PKC^G})FQ3%Jg`lB)>r>WCLf+IA`5}PWeth09LdT9Yew-co3 zr^k21`st_H{}T2wklRW4@LbIoRkgdNP2Y|T?(8ltanY^8|A{diY$?ko@1yxQNOZJ5 z%y;V(wt}#F+wN2cuePiNa=b3u0>7l!$%q9=$&RsnyJRt!5p;j-`YLawo?k?m?|gX@ z=m%%nL>|w~k=`G-_1v28Zw?DmFE?+1{EAbFv!}s!O^N^wzg@&K(uWE!%eyx{G~Ht$ zagnS=S?awUvc`@C!wKH;tXymaCzkQ#QUjw6a9n)#7DBWZsp^99SARpPe)7D?|Ns-$YOC)`)f)4=m{I zc7(mn#Ash5((O4vnAQScWzF`7t>O9l7`^T|pa3}xck;H)sd%y^v8u%vH-@??@aR~* zn_^@g+&wQHSt>AXoCnfD*KBIfEe6%vapNaAPflM0HqTCkYWA&)XiFcluICPzrgMSB zAoZ@q28l+6zj6&{DssU}p%t*r@(Q_(&R#9ILGjdT3+9KD4%E$zEp`jD!lkrgkVl_g zeB=~VsJ%Qb?0N)NUt>8}-NJvoJ_B}+D5BMrly%pYXEFe2C6ov7|7b=Fc;zF@8qu;y z1c=zP9$*CngMT7)zC+aM>E^gO8};W+JaOz^?IF|9oxzFA#gPAVIkVdl1oH4@#N5{~ zn)R9F?tIE?C0iuNo{;|qLe~2`O&S*XF&`i)%_FjMOJD?8wQ0e#Rl2o6d7lO7IMUU- zbE^-n44;kh02r@8z(6;y7CI6JK*A`a$DQ$g+_Zm#3fi+475D0x7~}?vUjF=M*LKwN z9>m}K4*i?Aj;ok;;$;iYuGq;eh=V}YhmYJv9LSfv;r)GJ(KdQ<66iia?4!8|mW*4`CYjLFdASl|k(W@!Q3@imT z|AF4ys=-DKaM?wwo z&i9;xABHmoWgj?niAqwK6)Cho>+)Lk2+aj|dDjCz5BYNv9af9*dG+47;E)@syhzh~ z)+6ypGyXMscM${kw^R5VJPUS=T3;e<`Uzc3VbMTj4@JJ&jdYC1>t=DfK8*vU*)HWD; z4PeU;EfR=`swD&?>MO&@rLTGJxl{mN!eKaYDeQd|dk`69Rr}4z#Q&PuG z)@d2?%pVLxTxYj?#}a->fExVnm9_b4iYYNTXIU-IxzVgJ;qV4Q%oqDN6$BGbb-UWa zKPCr;{Gv&HtO270+Zf1`O9xp)6&_PWoFX!@@j^>H1+MJT`Q66lJKZj9wMV5il?qrx)#PGx{pQ(O$m2?7qlx4QW0L$#( zZ=Pftu!MQ~Q2q?XaK}V#1}Yu~@h_A1PIa78r=&K)A#$V={rI+2{E25tokp5%?4+4W zQRtifJlY*fvi_o3H%sTnJf2&=i*uAmVA|Hb2PUV`pj&zUWp-qnB*y&M;J3GqItwc( z+X!d~pfWNAG}*%gdHJ3efuLtaicW-^-i^|t1<7$0#ltJ=4g^beuE17{`3HXA^=Z%&7mRrey+$A&NGnw(H<8l9DY9_duO)_!k#G2a46K! z#nr)2m}5I6ho)XIAdtCuJNjvglmHbWTW_M`THjkpGXRLljc1JLKAe8T$MIPqSKvz- zKuAUuX*e~BYM6WNQ9Vn4wp-l{%%e3sFwPl8K3d%Vrn*0Sj-5QsOZ!(Sc4T$4LRGxR zBfFMa#tml-h0-&#BSV~>fKMDuILg%%EfB=Cr!}^zsE&$g{Y3P=WJN1Jyh{%mwloa^ zRsvT;7#K^DTsn`~b8@W5S%4<=BaZ|PLGIT?TIQBWmr#w`@v2M6n_6+Dw}8q7nm8Kh z4&{GI(Q9~1Yi(U-=0uJ?hpmsE&nr$}HOPM6Pm(kRpP%gfwEjZ%49L_*A!WWes20%} zMj=2}BKPviD2&R{Za}2#bq%>H_+GUHsu*se?}57^SJ@Kc2~~Lacf>RIsglVjw*V3@ z-GI8Mo&0l8Au%Yh1LwwmlgI3yT$vmzKJhUYDc0$s88kxOgJi>QJ7ZHi7+&y?5Y!)l zUNcZYP#)2~^}B3DE3AmVDNwfUg$0mcz$phGSj;L`Ye%r6xAtkeMif9^m?rhX7IS(+ z*;ecl+CRTOar#UH%ppE3;XSH~zBcM_Sxo8POve715R9(OD?9ljN8~fRD%GMojrRrp z7g@%H2&_H-o&zg#2Ohz9%SQKSqcLZkp2JP;)ujKOx9T?}^mIuvV46OK==1+lcVNYR zr=WLCblj_Z@MyGVEvi*WXe^?dymoWzRSxGM03jvrQF&~WUSENHraD`B`~p2}?0p@e zOEO3V^C0FP&81Wu9=7PCxSvnaIi;Y%f)Oum?hXSvc~K5-n>4re=+|)o`I$44?)yKb zJ1+jd{LG=%m}EE|eCzvdd^Liy^BJC0_rLp2DW0+FrraAHvalpTWG9phJ&oiGM)>%$ z)!V$$KK);9qYa!528F0)l}W;NDee?$WyV=;&OEXsnxJuKknB(ylg7l8D2@9nK7_kX z=@X1X-u-KO>)BZBn7ye1BW)z8{vFcEnF5w+g7m%c^Xm5f^&7y5ShTQ7Hn0=TSf4!3 z_$bsXXFCY4^}#6mSGSbd%g9dLMOC<(KIYT}tEfLRPP{ZEQ8yphmS7H1oBk7 z$E?2`cER}a9;>W!lf^&5M=@B=Zvp+Zc(AUgM54Oc#Z+tL_Cw&j=_?wRB;E>~5-KK& zG%9sYO%7G?;K8M!7?xSb-7N!P&;O294k0i-Q!>i=m4R@~$Zc;fbFq_fs}Yq*lq{Y* zzg}A-CPpQr47&av$*xBzO0{D5^Fv10*OpO3gT2s!)wnL!L_0UMg@hKOww3?PS>cZL z;uz7h!-Tgv>$G1qhr3lv5vwtLTWZDZUl!eS-LikxqTJ7zuviQ1Wf8aj$qNc2p@9z!YuDGyX`)3U9PakMjeaIJQv8A3(C&iXUajeY z8R#oJ&9+aDc?{8!=vOY@^opCMGD@TJS|;x&B5@UME5At#NG)#5AGmzs5+x!Fhw3Htho($BdAoWBwvWEtK(sg zGXLTXV6QHBbx z!Cun`#bq!f4_M4JzAv&KNLcj==&}?OpM2VMRP%;}z%W{GY^;94?K1aguG&k_m_o4? z>s84k7vGa;_u0h+a;|e~0$0x@EFQrC0g*h8#ma^4CTXf*1=nE$Z{Pf|!?E6FCDmsK z<|Erl^>O`*o+H?U(nD_nd9~+G3e5mIc>o0^b;5c_D|R%>E6_VtS{50fpSe*}OveGp zigPZ%OByg04KzP>GOcC~Sq`OcaLqQ5VG*X>t0a$pUy06InOu+;cQ5Sjm`^&H*g0CC z?i;caJu<8^Z^5qr_aYNIl}68|(vO_kfA>@8rl%k1BcH{MNUX$aVxbBe9@1b+5eX{^U!Yh(B)b{m2JDmY@mY%8 zAU@alE5%)Obx-uPo{Y;Ub;5y5h6UVY`kG+vpRWyu2L^)W$@V>!wtj%w8&pIAD@%mX z@+C&?pP`yK%*vp+Mr8qQs4LM&z>jY4ozi(&x@uzf+CYQ&zb9qhH}%95XGE@K64#nA ziIA{}l9QThO`%!(v-4Tj+utW9o`3pMcQU(s>7+35IKz#5u5<$8nRNN^(wTOm-<|u2 zrsMCk?&0(jUDUCNH;2q0pHyl9<}});_KZmO z3tp~JW{^nAL&OInjo>F4I&f2f--gu1`aKb}h`6TX%t30=FV+A4VKUfCuzmxP1($EV zAKj#fb5U-T#=MVSZJZB>Y7`g` z_}JmSv`X@mvr>fB9%ut+ZB%{-LlHTZo>7!Y4h5?|*X5Yo|Bcp?44Vh918DkOOHkIJ zrkQN89>UGtfkb@a*4G~{70c*1b_?`1jYXb#zdq^^n5*=(9!QY5)8hf@+kLYBi(AX@ z`-#JPp6tB(IAP$$z9{>TpUke}c*vp^E74h}NZz#Gxeo_%Y1zwYI}6^83KAxPltR!* z(aRP?EMycgc`b035Dsf%LEpo`WASB?@p0DbaS)3A5jFO~7-V-IA)!(LHI=0tFHH0Z z8pDmmjwsVQf}+abx+qtKb!i zV@TmPHcH=qdbW|!BuF-xkB1-%zzv9X8sxtVRT;P)tWFbmGu|HtQUUFXR5C31G01|4 zl3sfBFKvP~I!2et7h*&h_dXdj^i|m6_W3na#LB@yx~9Ds?W@&A2ZfJa^1*d|Mxmug zb+5}RGHTUbk!=^KyeFZF(&uW$mwI)|2_5v}7m3*Z-uDaZV(<#i(%jhWl{W!S0=9YT zQ}zC0gae#3|L|U-So$pxK z!on==%SZslioo1PO9d}v%zO7kCwhlY1PLPKX|o$om#u6dajsitgh5n>{{3W=si`^q zvQEj>v_8+D0SI0e!q-FvJb47wu^$U`9#tdv4_$_%v5;+aaDB|*b+Io(%8~hh0wWKq z92{O#UMnv==9A=JiOIo840eLjQ;%`~l^<7CnM^;RgywfCGpP}%8!==dHtG+USsoMV7U^KAH(c&Q$c_q?-jw%^tNKpCg(>rQD?KB^2Po1w;FAFW^id?;s zhE2abfJhpUwFIq_DLm2dcm%+_0h5TU&c(N-vu6s^M5=r%}m&v4p)BLOx}dR(As-N3V(7py^EvK(KY_oOehUg z=(ieh8wMmt2_)zTmGlb}lW>=$(X>EorH`!@51&A`Br&jE7YnyL`(?p(4O(R}VHxrN z;#y*W-DqU;%sij&HMuMoAu!RhCQTOPj<_kXHjX||YyC+EZ$F0N#1yl!(?#Hi`luo& zT?;O)pUWFPMh&>>ri|`1MPIT%k5W5vrd|J7l~VH{c*#Rez14Z@bc&6l?DsQt=$2LO zPQ|R5N4hJ#dV$nuz@J4-IkNt7ETK0qrpSZ!2C6R73`m~4;wT4f(m zL%~yM24}vN-7!3%^qtRik$vAC-v~pr@?z<@3qu|N63)cEu%ZE0(r5LRn7*m$G{)}+SPOj{9t#oON*6!jf(>6K z6@(fWM5TBVqrJ9RoUorR@y^|H;F$`QCWHl(0bnUMEbEDg3jm=C!lzuK^&Fr(PT;!d z?@~0FzGz@OHb;|5R%Q8<#vitlAaEWXd9dFo9BY=|q}k8nS!*=q-@j@Sh3Kt}Q$ugU zFyZQPLx9QkX|sLkZAa5i=)p&EK?nSWa>@Lww%wGq2A28AdgD#As+asEy=Sv{Bifag z=ca!=3ah}QO&uMYLrK5+kNLL7PyQBZojK03|)m0E;x@ z-4BC_R*wt02+@BL??V~jF?~CnaXNRV_fwu|C2vf?M>=I zk+-W1%d=2sJ}BWd%R;!fc_qWx|Air>Z@bN~SOZxpp7UHXHsLq4@Y=AF{FsBJ5}~tg zh!^=yRjeczkcIGertj0W7$J&2(nA3INou;P$bO!?`NbV$4n=B~`@$qd8lrs|BfFSt z&G!IsWH65unZoU_>Be#Zy(&C4cc){%wW;I~bOJs?$Kmhfd3uwT1JQtquRbQu?)MZB zHUcvc7zNf6G{**8+@U1|+9GKba%-#F8@4iZGDl>gUdDL)dHqFqHH#>m5Xa^VaL_9X z-QipvWr3$ty(GXwtRy*X9F(oZfbIZSU$h_fEJ;yLG$>Z6IveVq@xD0kz>W+n^|4+u5B#N440Rg9`lXDU z2ZAz)cyVu9m%C_?qK}k>&G>aY7g*e2PAYH~$rBik2NyZcTzBIgZ3w1qhJK%}ur#t$ zd9KiajKgxgZ?1B@h3TgPzmmyi6VKyfIW31~6s1t=wHrfLpLAXV*w=iW z#WkmZa`x9n9b;S0d7pt%?dHaEb5$R4DdZL@V#>yf`b&m6cCzx^{HSTi3&?vd&QzKE~{Wi)0$J#gjTD^FsuYtU1cXE*n6FT++Y+vYv{ zH{k^Axa$zMB4*7mx;^+ZQmA@mR)*0zF}@Q(>XSp*>o=9yGqjW(Z}=G> zdD~yy;`}z{jyhk^FPG&wS(LTR3(hz}<)46(JQ+C@tdoncSb^Y`Vij{Eja1*ci&#^< z4k5+d{ONQWwW|{!OHu)c&=_BM_}mRt>Xvn)^Nvnmx3dmp{7ID#f$Ou-~xFPYyFT(p2$p(5Fb0DvZt}Hv{64T0yZ2Vc3y#sD({r+NjT!9Jt z8>hJd74|WzJw63pjre~B%nfpUn@=CopCkl}1QW0Xee+t)*3c1~pElkOe-53jQ!~tO zA3cj{iV~|BW3NZ)TbyzCe>d9ZnNd{N8+*G6l@yCTSRM-yh8+nriQIFTL3jda2PXro z#PtZF;=DDBxkF|9C<{xNhcbvG&WEoz6VU0ne=0f9`@7eTUZT>)v+cZ*EPj=WbLaa; z4NJ)p@Ua9bW}d?F^0q_Mc3;9t>}wR;9Fok=kcu$8kgsloOk%eM3Qb6)chzl4Or1O- zjp|=jC7u+!K&&6Q_Z>h4W~qamr*Qmc>!YP>AFzT@6UKxvi5zqb!H%wqg%<_ZvokVH zU!CZii*b}n10#n4%Uo>IO~94T7UOc}Mf`Jgglb0)XSgXc!aOYV^G7O6m;$@2E+b>OUi>GVwrBq5UYE(K zvfnE~|96l@t-0*q3CN=l%V_^5@?7mC=9Lp0D^8={Obf$aF8`&qg-x?QPE)N49#5KE zegxZ@IT|2cxl<=65Ltlab?cQQp_0q5e_}hawiX;Xz4%$mp`xtDy&}?aBjwC71>95< zL?$5_!74-8q=rC>qa(RFa8E4$p8Qj8-#EC+A^(tql$TH9JY6tI$IGenXakNvoa%jd zx$Y_;I<3i3775BTsIfo_2xNm_2mSnf7|FnhPjz;G6#!Oz2kqeGy?}>q-Ne@uye_JW zSmUh*U&%Yi$;WmIGeJ%M61u z;W-sGm*yTE+Uw=?%of~)lgd~q1l4ow40g7qIU%vfZ=ol(LR-L;6t4Z+3|pMK?{Vp= zw4^fFgznJOJC4EJ-_hm`4^B`8tz2lXj_T;Bl06sR9B}iFx5ipbYg}lu`kei^E~9uQsjCAK83HOEj#_!nU zOXQXWR1xaBA9IaHb0MBFI4gqQ;U|8=omOsV-__>g0?CXsLlU2*#;)+f+KVzf#jbX- zUtBlMAmNROsUZS*BV`7-EG(F;&thAt$XLgdn_lpcMC0Rh`(R4E#ovu%^1KAxHX}@@ z(uC0_Azv$=>|({>MamY3hT6F~2u7gz>rEg9Oiy0MGh=%zc?3*cqWxb8(nBj~=^}7x zG!F8{&H z4lHz4>s?N;^g-!0sD^Ybh~Jwwz#prhmIzsk5Lg!iplL!t(2Eh8Tuy;4wJt>^FN^dI zqzxrUZmPMdYVO4}$&+7xC=dxm~%3@>zsFF=_61t*vEO9;$w7>)bc=}~=sd1l( zso|ttr+Da|MCc%jknB}JYH;K>&o{M_bKQQ3Ec^|Ej33- zus1&*%2McD13F+B{#)V+TBM|VKB@SN2vtnwwbQ$lh#ceL*aN3A2X&)6(?ID{o-QB4e+be!Wv-wTMIR z>zIODv*G;B{SBF)tVhy#f-cfA!BlC81~)X2C@R@uqbdsA9ABsbzjV^DI-RRrp2KCo zxt&sjHx0KQBunFHq5NitC6d>Eay=B)U2PvLj`wx`<}wI6LeX#~Q2ZT= z_^TqL|H25KOztTx({*<{s6*FBIzo~6@!U^tAtA_xq_XY!CAEIyol2c7H-!OJ{9!Y< z(K=zb(ax_+0Sqx{G`%_|5Q|JaQ}EaV5+>*{9bXC;1AwaNybSia`X3I#>;U@j(C}O? z@FNa_Ey{c?6vWW)%fHB#m7SxBt4ZE`cq@`K{%u;5l$uu&VqWZU|HsaNlMxwZVZlW+ zyhflNxs)CIA@8H!Oz%1%2&pyTJ<0Un0z!sFu#2IHiY=@YecGlO3Q!bYp=|^$Y$wU} zKi`RbTG)uCJNKhDF~h-W{9~O5(Gb-%vWsWewID_+68%Y za;?RD_s9jE?FSz4S|p%(eYQZ|E%=cbW(DpRA{URwK}`b{*=MzN zV?zV*@pk%!S3dru;S4CjxFFdt(hDgj7hneg&k+!&a}gd|U1#He5bZFZ*9@u{)};pZ z8GPrFsIv<{;NmSjlX2L!4MDtKZQkMp#ffm}uuvVsTtG>&=ghcW-C#O+AaRn{*}mbgLI0a#1cBXrd)FZnQ4Q7!)&~^NR-sKW%Fj2KRnbWS&uY zJ(XThGYB_-jn^m5bxFE|Cf}z=K?ZC2BKG7Z9P`UcgVB7nKI_F*zJ^{582u*^&8$Fh z`uY=N&;6#B!VFpJxZNU&wV@*E@&al9wnv_0^1L9Ea=E85{moL1;&Ls2^lK^(w0@`P z*KL@lSH}0cgKLfYXJXK4W$AoCBVO)=(EKLnZ+VP?RazW55F|sF# zyeWu_b8sk(T}r(82_8gOxlnazN7OSWQcr|DsjAdQ$kXt+4s_vIcudkmLQ^l6!B)=3 zW=mD!7x2{VJ#c$vFu3lmCaIftepXzx4MbNJ5vmy@apSsIMVQfc(mZYrk5E;s4_Om$ zAGI+pGHdIk_?#phlP1hxmQGtN-%I+n<;x~%#zAPNDlV|8r>`0`Q-V&d&6b`bl3uKq zhkkUNQ^2+YJ3?2lZm?_eNjtXm1bxL{H1cK(%nJpaqtF_W7L$k>mag--4Tdjk@r3#C z%pkc#K~Kir^k8=5IcF1yE2Y5s25*hnziggrPAg<{mRjy%4NUf+2A9P7+x?Z(NbC1g z=uNDLiSFWHd86a&THgU5j!TdpG$mtK>Mld=^4F!g9QvVB#xwHN{BWEzP3}YdOZ0&@ zlp%SM$hA~$-&-GHzf!fqJd)kquC>G1yU=)*i?(2MfoQIwK}(L9O%giGC8@_wjQU?- zkb=Y>!7Eev^jum|;;f=0MCv9vTkl|sJ)6D1&^xu#Vi=u>bc*mR6Xz!j-5<(GQ!&H7 z;oz9eEF>;V759_pbrDw*a-SVP|6%K?f#s%py{aHP;Ala((SN*%aR1y3?}n%!?OIX{ z;7Acqx=Mg(+kYIZrbTLA?=X(rg`K9>|BW1xEiVMzRoPb~(w^bxjnUtbH90 zay(i)LHy4~1fg;DikC}3b{u}TcshI)mc9(GBbZLv)O(sM=vm!w$SN$OnpD_NSpd8t z&TS4ydBF&eU(UeJNVFL$L*qpH0Uv z^qConwE8ke=n*&Q7oPp$A&hs9H=JsNGdP5aBpQSotMW6W((B_+SJ(3o)9GhFgB@G5 z@PX<9yWKd$MAF~Z@#(iE&NNV>!o+R`4f^_*E9CA8P_*1SOa-(^jEA>@7R`NVf}#VJ zDy|Mku8ee(S-2s+gpc?#-39mm<_D~BwVA3}+JAEz+Exg*l-@PW_((|9w<6KTVu;qc zP%PawSG6-iDhmH00Th%?`)xW^HZHG(c*?O=ku(^_kkgYeYKY@sT_sk-G2r+!ab_1! z4-96A`N#=m_Xnfs#ImD~4fDEC5=k&Xu9EIr4Qy*msRp6w_HaCj zmScnFNOZ~+f4W-E^KRtOWbdk6Vkp14>$F1(SfQS0u=x!+GHr7`3h9G?u^-scdoJX} ziKfG*06oyO%{_|V?#4NI(Gk8ZniHpYjeyyZQru3u*uftBuS2%T?JbL>j;Z;pf zvCO8ct468;Rzt!u2P2E9lmDT;fI|MF#dNKRBpDO;gSSi{y$gTkllZ0|dtT*)S32Lb zeH+X~>pO^CBI!#AQiOon5T?(FcSnm@=rY0P`Yra?)~yt$e2s0|rUq_B4R-Z`pOO|k zOV)>rN^H0_?D1UYXeZFAe~IfQtNs0RCH0v#s{gK&V?R|bncw(ivnnCH8SK0oW{+wh zROwge$mOg-Pk3?sEfvZnH(gE;PnZV(O_efS;m0f|b*){wcU!Dsp)&v;h!&>V{TUlo z9^Q!#+WziLLbbFbq@cb?7j~eKOApIHTc&Xek}_fQCFJZeBKmnqGJA9ALP_aIYL8C_ zmY+;VLDjWjeQ3+v_21hPG#+I#tc0k zutrKI6_FQBgOL{`wDjlex<-wKA<~vly6DbQ(BK9|U#1Buaef0K_3S5&gALU3 zHx^=VIXGiz*h`!^lB`#-3j<%CzD8)NLyA9NBL7DfWR_4O=iz|lA3aTGER~arQOmTB z3Sc_3btCmpJqx_iHb?`|6viyOhHL(;f&l7`x?c2M|5la}>%p}Wbq86v+LFX6bXjSF z7fN8XX=AMDPj@iCgH+f#QgU)Bnn8RUVCP|axS)%UM?0q`bsmlXcex&YPm)>wO+16k zD!KRB?6YK^NjB3?=dN2!T7OF?1=;Jz$20-{e-1BQl)}j2CcG-qY>!(*A>CS-$MYPh zb)Jrm-=~GL;tYCA{eQK zoB3V*-ohcTKjf>GAgf&UBD8-6x-_VNUQ|4y?v$&8h$ZoALh01-5QeHK-ORMG+n@wV zN(q8UXDz_Py>pZm*0p|T=ti9JEsFF0^U&p08X(LYfWk|8v_i@@kWIL~~>g{VxF&s?PB5_p;sXj=_jojB4aPR3R`gvO$DQ;;>viHq&R*632=q8wVt9%_FHZ8zoZV zrdnS+sL`+4^{33!AZ+Yi6S{TOIK_S#=IGc+d~k>MJ-#pW(ohdyv*m{O>Z1=wUS~3;k?z*XELnZNDp$ z0k6ubL4eO-K<5IV)_7Z;Z|h&j;L9%B%JGr?QXKbCj7jU~6&u;MASz$05mB&dBIwe$ z=9zn_@U@TtNkF#0(v-$Nu@O_}K!{vIEvT-@Dv1}AHIg}zW5S72m)4 z5}zAP?hni3@Zw7X&d1munxAKEqz|0OLL56zi|Cx(S23}y2|51c4$JCiFez@+vBuPO zTI@`0Xw#U+S~IE5J=x}S6ol*5;BKgjnLjGvtFhGBqI!rdN{cA!i zuGV*FUG;6R>}3fWK33j&8y6Wpro4+R7jxIYZHr-+bxfcsrV80GFNiBf76f;tDzZcKz^+)=HmgV0hOLl zzf*PNs!lEDQvUhYGO(H+Ci9?XhU~zUVL2h>%=IX)tMJA?*|EnD>uov&6bMR&A zfb2V*hz+RWMI!z*cG;zA{ABnalx^FmQL!mYM6;DrE~oQ!VX%YT9Y+2kO-fLSRyW!p zuMs$ST;h2aGXU|FzW$zUiM->%ik&FE+S{=T0XnREmA!KJ5E7+Ml!b{2GK_~Rs4+2?eIA6 z^ev6ClQ?C0{Ke-+wb}2|O@qky%6RQfyIMg&xkPhtSgb2zy&dc$`~FWuDxW5eX4#TORMwCt+-}^ z-hEigjT=v_kjy%H00e@oK%x3)5k6(Ft@sV^Nfz3v(E;Pumxx9VAp;U6#=J*~Dy@sz z*W;okF2IT=cBjpjhI`{VbMtBe6QzDcGOr!&Zm$Clvl5>tEE9L)+9r+M`)3pR($++o z12jncePBz?p^R+efsb|{K()G^dzsVco(ks=aH4bB+X@@51*SXrk#oJHhm2iIV)BG4 z7VPpI?~RD*s!oNJgI>9cKDfl-f;Aig`SCBuXicagzDbXC^sD-XHW?atPGO`Qr{usc zU|YVhrBGbKKEZ!tf~%$Yjc_sK5#ILE&lp0Zj8VH3l*^cxq=i@)^}24~(~vUse!Vxz zn2n9~@C~xq%Jz2NuvSNBxy?f2*{<|L?;IHB9sW1I&QZ{u=WZNfPf}eVIe6}Rf3*?I z(Nq3pEtPkLe2KsB$5DP1k?iNTu(dQ)jIbsRTG3O1X}eS6li6(+${cf}!r-Y@5)Zt$ zjJk5L4Qr(p2*Xel5>@rS`|)AF^GRnO{kL9O*$K+fsH8-Yey|WVeo$&h4|ME82bwy6 zh-)m#GM@y{LAbrFM!Df0`<~7f>7C)6#=67iz;p(RFvDsq4Mf5jjoo0@Ay_zL-`ryJ zIeh_(M+{?(Ka3ERWJVGh7^Y`!JxCtKcNOs*73Imo2P1y-3tn^$#pTkRvfeuF!dtiQ zUl%3x;j4hqN@HxA`x4`lrW@L0$srYy*==X?lQmQ+VJ#qr5o-2s8pcmu=CJWo9V?Nl zkC+KTd1+P;`fnkBYRFC=weP-l{9K8k-GbcY29@lmm@4e?jD8EEAH8vVr6?fnZhi|W zy_JSY*xjFnnEaN)IZ`7|^VAhp4lHjoT`ZRfzyMkiMml%uwH+1mK%mRx?OkkQP03s1 ztEj&%V*YeI7kFDx;O>Bw8>ttZiFKuR1klYQjo5+ngib&%uFq+5r?0w0cR_#+Z=13I z16D2MPX~b7?#Lyqx~_y!j>13$@x?u|Gnl+7FoQGZ7g24e<+u5vAA%5W!orY7@Uds^ zT%L-sm2B4OeG41x4G$3U;*lf{=+5@g9}ORSyiEh+WMHWS%LfuaMq`g0(~~lYL*=JZ zu{~W`KbZ)tNe$0|P4bjYMHL*ZWaHhOVQV)oOCh8bf#3S|A=~@^`Q2)@N@OHX4Ego^ z80^=!D&zZeC`t&ILahYbr1?2HR~RTLnEu|a_cLMi(_K~^{nXttt^~g}6?t&o*Ly1&EBm_`) zhh$@c>B{&b#3GP$n6emY~HPuoNMz%VO>^9>1~8sP{e5wLA2tcUSrAl>dtNxu^; zjyUBsC_xX%GcQ&6RrAMOlG;umL|y%+`}|=Gt@94vTAjoZc6;yb==#Bi0h?%RF4d|= z^qci*>1Z|9_?Nsn2Jn+cdfjBQ^ijH+p7Fry3A-*)gSCa8o&=(RCu6KJhb%I|Hi(p8 zO!B95p8Nkd*}6@cEtiF(6A6W??$;T*m28p0;)xG|GrIW3Xfs{Nn6+`TpmM>WKHbSgCnOFM@g=M_wEaRDFQ!+zIABJn$tIP)D6mg=2PNraSER+V73EaTrB7<)eLuXYeIk)ePHJLj^IP%zFSQii!(<%^ z*`V;juks8-BbNy8`iQnbO9R_=U;xPOpGR>{+Azi(u2F}(<-r{NY_!C6H%5J5b{W|d za+0q)>S^n~?da;sv`wQI5FhO~P_s}Yw@8o}iA7qNCgxW_{7I=85BpM2Znbv5pVoz8 z4S~o6^!NBfCuiZJLN^5saSeY?hs*ZKS_vNggL4=zB-4xD#dJMizq={DSg} zB>!OvL$$3!#40}4DHSse!^c&lNjyYWMZZmFHnCa%1sSZuwc^1(>9UEIx-s{7vh`Pt z8$iBSCX^mD*&t}(24lthX>TwO(uBs0)z(HQhDA(IM zmgWw;RdYTtK(m3PN@cB_xoW#J^Da%t-~{nv+JI_TeA=O;n|GFT(FQyN_N@ns4-Ab7n z8QWaKec8QVsIVFy9{19}T?$tm*aNK@RLx5)jX2>=Yeb^N#7SBrX$UE8&7aoyePUcI z1?DLlM2{?>YWV{2X=8rn`@nwJ_NQ!@`|NLT?K#snU-GX3PSpgv^02lFA?irC1DD&@ z+1;3BvKc7t{nq8I(PV@Bx7Qq7SX${TA7#p#7>4S_7O7WS0su~`d$;Zzj9?6^)#IU; z3+9X&jPiZ6@!F7Md`u;r&L?#xm}Q2CEjC9q=;1b^EL68eZ>>1mO#w&_OsA(Qpw+;w zPSL0mALU?8o6`dSjO2>+0W~GaDYceP|CbhM*5xt$^MH{$7#R1Y(jk%!5h2C6 zS!7!HCms`7#6W&4eRVN*<=DC_Wd+XBr?GN>VWo)!PLkB=N>?4Q$%V*?pG-t`iuWEDa?c&o?`mNLPB~}y*~K%5MNNG zo_$vC7MP$+C8#Sj6>rSJ?4jfqX(W1V%fR==oC#=RnQzYb53NtEFzaWkjKT{usr5NSNB5HeNsnRWTS_yy?g7YQ+y|oEY zN2j^!!vc5J8dW6#3{kJCuPYfUpN4#-tL&?{&S5Z!lwWMH7F_w_%-DZ8pMS{%QpX#b zk@Z9}#kq&Ps?+c}`_#b@ZWq=BnauoF@08z!%ktI!0kBY41D4%g2DC+!kEIY-9d`ON zij7u0vy=eSy`S3oOPw6c5^`eI!yu(8^>XXnYj^uW?+K9h>e)q&1A3Y7J#nfakx?D!j z&9@Gm`auihDw0+hf!4dJq8D`Gaj#ff06iL6$h`4wx^=hSKE(?}#KBGRCk{|8pko06 zMXf8;I1}*~e^Vlcz8OrHRSN(bZlZcN5*Pk&sHAiG&Et!dT-`x}SWu0})@PM~TkW)> z%ici&tXEeF`&O)@Y0#8j9tnqK$tk;iW>c|Aytygjz&yQ9pZ}P3?5I%+g!=e-I0Tt> z{u7L+5wRUSlTOCNNd%fupA3{`fZ9SLb>p~H>*rtiDe zklAEEo>YvO`09QIuFdKFMYChu6Pp_UUen#TtySK$MQT}V1nVJ?5dOo`IMZ^BaGchV z(qPp$_gx9eG;NvQBzm*#mhg?t_E&eIp^rnK2rLo5xrW%AT6tW0O>$-{gyue~ALMhA zHok1-9qg)Jw<#Cn%VJ~&Fas(nX^>aliH$zNUay1lulnLe5Il_l?H&(9rG5NYFw72R zY)Qdzk6^leoCBCu1=MGvL9-5hA2-i5`s$=agymOPP}A|?Pxz_s<>CBc^4$!WbKZV5 z*@(16L#GkSD{r+aPny*Q#v%vRyMb6lbdI{sg06Gqh~%iBYBofN$wKe4h2o4c89F}y zPJPy?62+UVXwmg@sk}#Nb2`|5xHR&M2C!o(+B=%&wBjIK%1g4~o-kT8{2VH}x%^+E z3+a5SMZb|+CPcu@YnI5lHYU}s@=Pr9XF@mdx;MKnSM(^JW;i6dx?u9xN!%#+gC%#% zvFEAC*lE0s&VvGH+rA0!loIw*Uj(WCLrAK@(G=jBwnK36ZL>Y$3UZZ&tbr{&A!u=F zTt)Yh?%)=(nn72HtVN>oU<5>{tC;j?6doC-Rk|`)(b0TUtD%4ou|6`{x$d>@95FI$ zX0Mp80yg&awL^bB%prP!<37VoEm2$Km88^&*oMg}U3Dx@eXa*x!-kcQ(YGEH;$yPl z^QTJV9{eRPA{uuN(JL=JU}CrkX%3z{ouaqEm^Y6%pMfh9?+;5mT1K{F zz8nmLpv=&vQzbGV_z$E8;%b%h&!7w>Mr}YSs}|+%R7nT%Y)=;0)L|(_v^i&#<;BC0 z&1k6@qdSU1mWgwJ`^(Hp&4#or2kc8?LI`THn0{2Nk=D4pA#uR?Wd5E>7?}`I z*NL;Mg9(42l@bN5YOD>PfvV0r!@{hguI1XfrTEZpv61vGDHRPzA}t$L)b{PYj@qtG zH?J1&W8#cArB}VUG{1D~+Fvb9u{oj~2s0j1gXQ(FtN+xCitLi@RfZdh$RGk0VuBSo z%x9QS4(HT7JDG05t8Ae}T%AO?20c!m5Jqr2yJJlf9A+S{XWM6WYwoP+!?i*;W|J{7 z;>q>UFu&7j>^GFM^|63M>~%y#wMOYF>;*}8Dl7h6K&OJe%u)$Do%o?8r8^JN-BPh5 zBTu+s@@b8}4k?dovxODqZDq-Oa?CM+&AH2>SOxnI3Rs%3L%g==&xN5XFbYAKWFI@e zqL*4&z(vI+iPbH#&VHV?&_Y*&lPlQh&m{G<|=hH8Q>tDYVL zqZki7`cl^UN^9KnugwiZ%Q>-Q*mZXSwEopLWjORI{EDfWc%}=rEDZBtsrGj*j^}rR zJojgm#Kf9~Z}*NKSH5|9#q7ME+vK57?~@!H?#@U_gzM5|yTf#YdH@T2?QqHvR4{ODw*OK|=WU zhZ0T?j<#!yAXF^;yxsnX{XBP^ofG8n$NaaVO*Nhvba0k|wke9Q?5rfSadydVBMReIhD7!I zQEJl2imkEt&4dGsk2>b)94s{)@juq?S=vLI$pN%>!NMpw2DDxOPzG$0B6B5e{!?%v zddHZ&>-O&;r+@0EV>05yAR@D#fT<}fu`G<~EyheK7aufk^(~q?61XPCs=2cl>SXN} z&9}X{)wx zNps2zI&(iTvCE6ar6#$V1R`Jm1d=u!xsmvsYo(6;wp0e;6D@}u$_F4?3j`wjhK@0D zWWP`konu`O?JM`8BEwQcWn!LuuBC-kCj}S2CI=kG3p__a>@@ht{G`4Zp%sub6|uWD ze0B!pXVceU7;>1^+XsnVZE`KPwI6Bs=dpdXB(}VTH7|~6c0P5GJ^2#AYqcF(WdV`@ zZ+>@2$BiZ2AQbdbLhK2~C{3FvSUpPr2qI>abbS~+A@Cop=GRkcRa4K@+~k3-%hfgU z+_x(m7NCFLle*jK^u6vEM39iC2(0^2Y3$x?NT z1NxW}j;RBJoY@DR3ceUYIU{3R^$M8~os;)ld5ZWJ_{SxKCGS({ZYh-{C?} zcc{Vj2udDDH3t+L<08xa-tlk5B5A)pG^w*|WGZSnA=L_!`U%t$rjT6)H;acT@Qx^S zg1pZpty~u(*}1a9({QXYkf2hW*9(r|DT({1E__gahQDcVh{R`(pkNB|P>rn|Xh_O( zJF1Gs^_tm&=5%5na!pM@X}v+}p1xaX7|uwMn@4}$Fp{!cz1_q;MzLu}9|!aeZl1ru zg~5QE(A!;$m9VawKng^IG&YLnCMd`(26~%3fk)5M531?R!*L4~hVGNa5DkoQ2Jg6# z+~n8kw5}NOkt_=V2kYZ+tnC=VnknYU16c=HFkLowjKO7n9qyRrMY17yy$Fc89M?lJ zpC3zuN_DSlid8isc4J1jT!A(DN{2fFIUnT~+S{{L?{Oy)J2AZd zu4A=RqGj0N?$<^2R^#`}rg%YLm*Fo5Bv(2oPhbO#clqfB^QvY83V#OW{;e9XfUW+A z3(nudg|Y`Hs_l4p22IDVGsTNU!ruP&MEbMxAWrd%kkyB!XD$h|h2osgGJEzos$rX! z9o~fq3>fi-ohxYC=y;WUT5|T|8H}KD4Idj@FCK1xt6CHS418}Xal}z;VUI)z-1gw1 z+&M1Bx?YIrm|g>l;WU%yj=rcnLBBz#0VQoG|D|Go6CKus*51J_iJf=Li*HT?oRX0) zIZxbZsKox{^B9+R?hp>N988!5VHn}HUQ_bngJsDOjxFzJ@l!x3(4;KBnr{X!qW4#8A`q$22|@!E0S1? z%fQ0Wet@K1f7FsivG_pz*6d{^CyFoqK8gvB1XtS(60{Eb3GvP zH^s|)#x>vCj4mo@Q#hZB8-+zv*dXr~GtG%TwDbx&(!R+#s)v7tqXWAh_KN!}9m-sH z1H4}kpS1r+qrHV`nZ;CD7dcVHEu`&R!)ERq)jeq10GCgoz!HPTQ1o3wxpf}Uz=t|D z&lI$ArzBc3%_x3c693Fl`KnNu94Pa{N>+Wct4_sn1C?%y3l?dR!>){!b=K| zh{1SGwF&Yd^v7Go(>fl?#d;SLh_W=eS7)0UkyOm|T) z-2;CpfEb)^6LWy9jAGG{=161C7x>6fR|-221d14_q}i#9>x#1S$*TLAvK6I5#$|pdx^wD9>AZV2Tv^8kH^?U;+Md|Nf_Ah*c7%o zzhQaI%l|%nN82GSvh?)QS!M1T_yr7Sp==F0?P?cv6pIKB^In&hg3Ol1;9uDHOM|ry z-afdk|FVYskBn>)!C~!)f8H`v?0t_u8!>Mb-FeoH2T?_@)C6J6FHYJ?42|y51Gy_L zSS)<#++|K%iF4)Grq<5SxxPc8A_+gYq6G|S^;w?NH$#jX6>O1cTFz#GK-QiwK!)$% zNhffA+!fx-*n#ct&cUH=dpLwT!Yh$3Y;1r(&MRT&`*$qCPG{$NUhC17?!tN``v7pM zL|XfyQOIR-1$ZYOi;5p~S$a7dH4#+J%y7^_jYQM!hmC-+1yS{FvS9l#9Gw;5_G z%(V+hTZdxC6?kmb`2GFpOJ~_Xx6R;!I?${j`6^_N_sXu+FucR9yDe~8)OLBuQ~QIo zu7#p43?e_+yzS`VY^gn6kE=X$xD9OqL{?a0D0bO!HK+b7KpDXHzBUlhGCsY}?H z{d@ZZ7RzUPAh$md>Y`2ooN7f^NlQhx4XA)D1zC+KW@R$8PB+Thbj(N~iCv}&RXj^t z1*R%{nosw!9i@YVE2tmsRo5-qgW;0vxse$SpD1FGXi-nB`S+eqr2Gj9-mU7=Y{dc5 z%yT+vj0G_W%lL+~mi?jgmA~MT8DINdin?QcB?qAMn?;uw#f_|nKU(4FCwi_oGW{x2 zS@|Yd{={$>x`Tb8lw3wAWDZH#d(ksSSIu<`w(`_{Sq-3RBBy`Yp^}I%5eQ7&^!<$| zyeh~*!ud}lsD^!pel7IAqcXg{;))>$Z5(wYpQNSS9qcDrz0G>G;zk3Z)?q{u=-w$C z@F;nhu6IYaMVN^`#8eKI$I01U)K`4``rsSF*=HFDOKGc1c`}^WWUyJ;E zhGSf1UI8u$TzzM%ea9#uLj?7x`lIK4;nd957zG6Uu$V?c=f7+_8aEpy9_*sVZZKum$cKC zH={wq4apgiJv?tM5}5Q6ROn374?jP2(Dq$%DsxecLeW5s@NFz|FRw?Z9lMQeBbA~Z zCU(=4=-+=#5%Yt1qWfK~#Zy(LVa(VkeRxKyLAro5L#$N?ju<3ke6oq?0TZ4%>PwOE z)@~Uhq>PTyonHUya}Pb%8rWM&oL@5`mL0Aj1=w$?3^1zr*S#ote^V!Vsk;1-el~ZS z2(mg%B~alF6>0fR5vB~oyaqijhU%1nasq^{`gIeJ9g+~qSd%4I?@%$}tyT#nc-Q|( zsbdn;t%>s{kTVAj4V`@-YS8m14{bZkfyJHL;py9(bo2S3RG95E=LJ!KhRFj6qP&wsERV?p~gm&Zl9Ic|5u_! zdL9MNcOqh1_0ni-$5#^>^z8=gm8{;6pe`_;$iJnedbTI!eziwAoxyK7=9fD9v9R*F z;Aw-!{$Q$1+vthXX^)=L4EeQV1`spq6HuuQfDiPVT`yX@C=h_|KX9zw<)#k^fXJu# zcMSC?9y>%2LS0v%YgFrV-@Yyq41!vlnDa+OB0EX##7|!T;j&6UQ+rfUrAcX{PATRp z6i0lRGDA`J&j_L@RjWItLv@wm-J~$c0j-vHW_^tEs(K9h&9A2pJ?)cl;>zJf*oIPT zKWE!K^#k{ZKR3k+k{1H7qa}_!;`+`_?lb9eKA#271k29l3ZmHNoLYGcssHsMScwsE zK)Zb4f1S7-i@jfFiUp$)2(}cIK3Zm2bFIIK;!{rvgbhcvhc<-|sj z(iaaLQJzG&RBo&wu?i>In49Y;f0`kMd87XEEgonBnIj;~Jh9^G-C0PrSYxLH6>cAQJF^r* z>GwOHbx$09u|0CzEzi9HC{E&(=(JR0_XkHvsNyHS1pz>^Asqjp?oOZHx==V?6H4E(m9`Hu9@t zzfxP19KW=Dz~LX0W*f6p{6Ya7f zQr^lg%l@DYOuDnE2?szR`@h~&DZqsCCb_DnOU0X5X9@(-KlUrz#3|X>ZDnKot@ZwH zmOcMe!BJbTZt?UJ%l?eZta+#Z^M`~gjFCM5a*__~C$nU91_8Tt2ApMEu3L=8To4QV zF?o1s=fb95N7v#Iz9a#GjO56H)OxUw`-{hfd|*A_5BptPWRBVl!EFsHsP~Z>!B`}# zi4T*s@uAQEpe^e9Cq>m0NXS}=dZa^+3a6Ggy?2^RpR}~bt1x@=tJy4(0 zKG3RLj8Ki-5^-)XPO#_-zkrlHEs7xTa!AH?9E)97bk=q~XON}xA(516e>E0!7;E9& z<+MA3XLV~dYfpOmF=FQAhz9-$yj2ReF1Fa?0pCYX%+9OkFfjTvoJc<}0#A{*Hzy40 zsPb>EXW!^?21B|phOhh69FdJ2|F!m(Xuf0h3VbBi1a55mvcvLU9XHkd@9!c1aj<4a zEg!Q!zG8=RBhd6CK{2+WpHpxseU-| z0>|Um^F%hzRnx!9O1XIemF8aNI3^M|7UJ(MZ&E@a$)ZDVVieG3m>~%&;F31!{09g$ z=Y6~BsIM~&JXmu_!RpBff8i;VqL7;BPD+Fb?IS-Gp@mo$@2AYT&N$GB3u#GhkE(Ts zg>TGNW|$^bJqiYj?{n;V7~(QMWhh}8p4N!& zDRr?Qm4Z2@!u#T9>}XKGPm_t5`<4~|i`Y@3Z*widbJaEDYlu?r5{2^xDf|~}IU#20 zYI?1+GDJ|GQj$4vbEeBPT{Ec%IBL(RemTCnCOp*H6u9f)WbaWDE@N^eiGx%h-)@qP zl^88^%!i7-aJaB=f!a90er1=kRK)y4+0&P%49g}?MMU!Ji~}ys^JQmp-fG3ZP<9OD zFqJwdENr(m+`c_f9_Owu1uRnP^O|E~IW^gcQ}QkzbjPyml1q6(hliK@8x&8F;+aZ# zfmB}C8QM!T{7&wY@Dl2C?_THgjB8S*aJF`e5!Z7u`u**R<GSNbLvZMTl9XG?{Xxub zpO&ZE_uz;A7OtZur-yWk-((x!WruJDQrdpr9998%#}#O}&7jNm6oLQXBC|?^x zc3f^4N0E3Fr*Co!=0sFtRTpL0-4vnJ_m7lLYz*Vh;=8a()*U4Bs?`4f*|3Min~!fI z!KD{cQp$~|Imu0E$=P**BJXYrfZs)WH%s0|4M#Ke*|>Gb?CcmXC(Fzp$F?Yz#L6l% zRuvQ<8Z%H|JH{y;FsEbdNuq&%Fl)Lynq_(jgC=)3tjQ>kMR6$l~~v> z3XXpPD?}v;QwEkXY@o6XA%uCbNbq7#Zp$W8m3i+l@OiB>i3u z=s78*3un8~LuUkaw>=`DI%dKuANaB_-HAiKh@6c|e|y*wQ~=xyNg&O^cJu##nlgb* zBIeB$xK}ynrR_n5~>RJy^l7YAC?>HX@9!cuj%{@n5qA+AT!`iZ_eN#<=l zWRi~HXe9PyL_6G9mGXFteb2w>%jiz!kRRbu>xu#3B|0eNNg_M^6H5f@h&v|`k%SDl zY9RKy==+zML1vB4_X{+VyW#x)#!mCQgRBO_!pUvx;?6{_myCq=FZ*so=wmJrCrTc)4kI+PC_X`kH`^0^UmbdzJc8wX|zR11CxMc z>17wTc$%H$67?Lx(C8w2`YfY%cTm#^Dy3i@YBwdO3=yKY`MInFptpDydT!_LQEzlK z^JS;MDAeRn`Za0GJ}m`_lrF4_FUq1Lrfo{PM;6Ch+G(X55Dz?yS{K;;l@YEw!QQ-L z{$wbFb;czkKvsZD4y3!#ud=K!{~0=Hsv*11rZ9F1gSc8=WOjD#5rDbVLx5nCQ^yvp zjvcktkVq4WyZ5D-r4XfJqoGDMDM2uT;Y5mW640fW`mBK{6%iku{J{!`0vHu~GIy0r zA^#tM(wwdymnt#`Ts!V&?cb?-K2_)>ML30Cup5YY*iWxkku)HQfH_m+HP)`z<)Yhg zuJE2dvR~t}g+EyEBKu8PG&$&7mz30 zn0X$NQbV{`=ypK_C9>hMrg<1H8>)_t-%UrXjou4;Wbq|BzX#al`O;{a2A-}iMf$SV zLU47s$r{EIedv!kAqFuaWGAzK2Hf2DbUt`Mv1pKe-9?35<)tt^*xZ+F!~0&#uBN4Y zJu>dR07=;$2Ehzg1oCU#eHkMZF^JTuHocq*9>qiEgy}4|`ngw^&ir%9)=LpQp9MQ9 z$T0A%5`r!j9B~qTDv15L7>Z1A1p`AGrcU9J_hrY-|CZWpDH9h>?;FG#{|+T*4x{`M zTrGimeMjSB%fI3OQM3gMUHH49%>#)=*=xvfJlN18vtk<9#U0ym!VxM%|5G%$#OHU~ zAJv1o?!e9rm8+hD4(O$Nq7E%Bdd=8BO5fU_^sbs{k`>u0XV zJ%gUU%*H0Uf?3zdoS+VbYo2?Rt$YzpH!ynt=l3f-q&ko$`hYea&~49I?O0VQPiyMU z3aW89Gg7BE@+|NhbR>1~%C4%@%YO^cEJmS&p--!;k!-VVBj1jBRoSIN>hzVsc>w{mVo_C-Uoh zjP!WjIE8v6u9!tuuFy>x+UzSusMhxcqoo%DQfD&Pj0vc(=8Sao9a_?e*G9?+jcIRM zYAhLssc{CR2G%D3zYUyK2aR7sxGVqQ35?(;ei1haLu3ePN;}||@p|Wf({1Yd0fw6u z6^~X$g*wrEWWAZ$)<9w>l#&cPbvQ2~9B%=*n#1Or#$YY!|m`D>i zX$r^mI8TfzhEcR^c2>**om)xTbDH0m$_g={j*ePbbW0zjy5G^!0Yqqqf_JNR#bG`$ zpqST9916WwA0Frgs6C3bH3r{;3l8?-Wb9_{8+X{HE~+)gcL?H*Z~#(gNQc?XuqGQSnr>f}H;81DkAZ{Lea`wju8(%qVbnT27_@-48G z{kCC_-b5b@A^dP+c-vQbi0lV`;I~q)GxN^0j|j17#zjs>P+F1xHHhlQN1t}O4FLn^ zIw{HVwx_y@l|kcBx3{IqHp3V4mT)Ycaz*$A#Mp5fJO&wo0J<)+lUp_dS z=)(+zuj#x%sf-YBrHTBdN>V^xtW<+Go$!}Frb=ec+gU7VBAmjS2)dBGc`g6QFyiXy!h-OZd9>3hlPr$h>rCy5plX^rK3W zzd0DQL5^)RAuT*ruL#A(6SAbm0V;?=QtVo1Z4=JtrY_EKKCl&Cuupnbe44 z*MFU%^)G->S(^wFn(vx*DgME)bGC%~%A!_|$8dVN^4>rSLc;N7suJ77I!GeB2EsLR zlO`0;ZFhq}o>G$2*jbdC4nC4hSo_vymh!|-m^3v84 zOo7YxQqRldV&*?=tMY`?M(38{Kyv@y4;W%b2(rnB^EeW>fXGghZy)Vlp7KmfPrbD= zu^=8NOtJa1Mj4nFtn6^$_;cgCz#2a5@tan(T0k;gypI*6busKjQ`vh7?2V7(m(f_! zi{yMzm)Dfm3YCxcF91wCP~20IJ!L)y1}k90LjYLjs7=*GNR3sGbG_+TVogka+k`mS z1SnMx2(2W!ed;>1|NcB3ibp)#pPn_=D6BfaTJUR2%7P(V67i8@_Tx^dLHeQ{%pnWtvlPT2^mT0Xehs7kHJZtM7}DXhuCI zt|K>6;Pcr8RAA8638^zt8lA=SmC5{?xi*{1+e0+xgQ+hw+uMxi>vth;GcXT;6|+VR z!iP(?t}F~F??Y+c$NMO)URzRM)1=pJT!3~+a+Sls5(Y3SoEX@j^##3bU!+mq0%t2N znDZ@VrXY;}^9swZps6CaYptF0kXWvb+rdQa3AJ$`4hatguCv!;111+qlin|GOp-0d zpiSu1^Iut@evWc1Wv*haX8$)-OGtl(zd4>y6+IiCX&Rj{DAvW~1MhRzFZl+|puu%Q z$@PnSS7SapKMTWe!RZ$&JMcy6639Ocn-9&0hSNeNpNz+P;b(`3w)3vB@AWBUMvQc= ze7_`jbxPY?+zCt3&El?1dK%Op=i!s^ADofaqsRW2e6&9a@7lfnW_!kkRQ(&U&Y!PM z0=~1^`7oK?gcTLbw!rL0r&6Tf$lj0YZGw@FPrxDwp#a4~h^>6mM!zpW`F8A~j2#D;xr#%4tkJ zq!2mdHTl$+ztw=r>x#n5@6_c;>+WL@#$w+wD8G#h8yH5N;lz~Eau%S5C-iulh5oo< zk&m>9Y!q(uEgVkcO!OiMt9pUV()A5S=1*tYPdL6)5=+m;$f()zBEz;7{HNr8yfn9L zT_Yx6gu_VOtTtaf*OGL*%a^@199PkTy*vtB-snxCK|-vbRNhx73$e$DA`U>Q`%Ps6 z;?Sc`!q*b@L!!Vs4u(L{o$U~tR~Nd`-0kFs%9SeBq7s5IVa+KOQFx?qC`@SBSG6&xTe zxM$k8`wb($L=TU{w_Ot%GFToaSpX&o6Aw19l^zV?sS%v7r_AhKP;wF(-|YNoTCYm#ey8DkbfN;k=E_@YcH-WCEq%kYa1NiIk8Y-^nsolpq@J^M^stm6i?X zh6GEF47mjXQ%Stw2~oh{fBa^a5Ye519LVTlbF_s-BW~h(%*50U>ql5&SzoD;npEN( z43ZV9GsDAGa@z}Vq==%4rAmPosX+A}vfg{Z#)z`4azl;uRI75DpP5>jT zRjotvONO<-QcJkQK3~g`n-D(nCWCyZ9!LoJ_OI8X*#U+OVc-)uPkTb9Vl9>9Nh$m# zdgWk}XoMo28YPLzg*}Dd6LV6FZ)M(EuH?{&{q{_{AkYjn6oC*-uw^MxgQ2L*Pje$C zupG#0^-%$AUhv;hV%ZYQoIg5O+$1W6A_GfhfWKpM215C-->>%!1(w%=T8A`VwEeD= zWFEnnmA#c&k(@7$?hL(c4Cq@}Cf+p@Z?UWKT7U*y#m%ag#&144Jc-qOQCc>L zYhc-y7vqD}QD)j^@`_>*@$M%baNBePsjk7={6xN!S6jPNB##F_dCPo8eMw&p^Fh}g z`7+*%>UYY+H_E>I2=Ww#hZWVY8J|ANEf4jayR6?bELAlHkJWjFJa>|W)#Pv$g<*W~ z4{xFybCJoQpwHxg&rk$b3bS2=Yu?PH*5|4yjF6rdqB{t4BXQ&V(kVqi(&5u06Jl`Y z!xPkYg8I#)mhP_wBIlVa2)yg2rXZcs-Ep2<7Db%rRvV_+)_zB3T<2D~t*q>>b`Y_f zT>xJD3Ir%+Z=z3Kny!#>U&Bjf6$?Ung4ApzX7Ry<%LHOnqa2$Xd*oqni@+xIN5LQzXhje4d>8UnSQV6jL^%_6 z2C!gM_32PfEH7qR6;wy0a|amxkT71Bj)!+9e%rq6;l^`Woc+4nVQsd3pLr)N4*q96 z-%)byYsMyNX@p+%*S_j z^}#UdRhWLr;;gJ1kg^o#Jy9rE%(LaHrHwxYWkkIkFl(PJx%o$FG{>L4t)iGVwBGa! zLeoV#tJS9GLJdynO8Plz(~fls4nm4-Hemlkl00q?FFO4$#{gSEq`%`3?P~F&`2{(9 zbzR3rMXU4IWGr8!yNm#u&C7jR$8s)Jw$O%kL5B6Mw0b` zI%fO6%MIuxMx+%J&{ogB)Cp8>%a=R6|%V{G`7arQjuDVppDE-9}Mxk%?bW2}D zyF#6mKieTrJ%{d#x?hwy2$Z&8&M5?FjBYG@C<{sT*-LH;>+UKBShk_;O~(XgkvLV# zjMvcavQpk)q(9w~nVevm^wiXT9;}pQd$=3LaxmyhG>KD-l;Y_q!B5_`2C+qsa6i5~ zd*mvFDED7a&(o1~;!xLvQQTHjYzX%*?nmf7L=Z&}*Nar05Tpo-CIf6=gMhH)RBjYH zc~Vlof5!s|#8j)6aV+FO{5viZKF+7mZX&%_NtF5sT`$00SA=(Kf}whCRJuO+vp`{Q zE1JRNlGlT(9PAMRs)vU_T>)AQzyb>fLIHZI`%>3yYX}v(%~K7X;PNy2dPHT`V+2i zz3AZr6aiqVo;ha>bMxRHR`)@P-I6Hs;@9eNoId#7HgQeC{YordcJNK6Sl#s-Q2!`o zOa^q;t=55-Snur}3iXl~v!CID(r3I+vnvVxFRQg(_o1>g^_#r^v*-n=mODvJh?T^= zb&-U^nYCL6xlOi@UbPmer^^p`6|WThC10d_OCdlSFYbnXP7iy2m8?CIrUOtjYFlZP z>gqXFLvXsu`YWoQJ#qVR&1YkJ&SgsuuGJC?XkJ;wk|#VEhv5?JnFRp!wRN?w zuql|qT6d~rY{E-5sqUgVEvTO2cL{%VyjyOFth-=kfgIZ#lcrstaRS>l&RK*I;*L#R z8{`W*bAK5daD20Y7hFiGybu0qA%c#6QjP4092;M`ptt)L53N4E)-z8zp<5My*JH~- z4O)acH4_fP(5aEE)Z`$vqm7-< z_nyiCnV_dJ#?Vi?dy)uI(VW}`q(VB>2>y-K_-(1me_>TzSD{wDTpFJvLKs@5AY~=< zMAvUE#MjmA4nnFKA3qEzJ}xiVa&dLzV}5>2TGuI6{6bkk3RO1xkmu{jFU+zoeT(l~ zc)PJHA{}M}`p<$C(TKFplmLH8p8wk?$#&oFkR+kbGs%*@AI22_^Zd=Hv0Tfw#2fO@ z%3Z63?F1z%)BD@*1C9f-KTC?;4QynVG?SpoCMr7kj~4Wtw{l~EUHGD zFJ!(wO~V$yV9W_A)APRwC@L2i%&#QO7+l7U@t98qb3d8M<>Vs7X`}_V{HV}^m5Bwa zzwh8aL-(wL&SJ0(BC?|YB{ifbV-K;1WcgnikHAQpz;HRWompAnR}jvM{J3LAdFG0R zLn+DYUR>TokD%%YXMS|8*5&Rbsuv zX5Pc}r^v| zjfL(P|F}`xH&_;W9b-3?r}K7 zh&W+zE#p@Q8A)qM<~}Z6j8a)W%4DKksV?;8(n==Cy8z<)+6C&D#4MGWsugKaT@ zteP?1M?B4OP&rcMKtx+|l(HKqc_)vkj+Q=f_fsP&v~t-=@Y(`h zMhF7wYA1@G&2>JvY zVekU{4Y;$%&kl=h zps@5icSUk*ilt(A85MKe!-{AVifVKPu7e7!k7`vu6osNjp4xZMfTru^tl#tUe34U1 z!FO4r{XrG78&N?$ritsBIeR#*4?~yCV`PU5WoV<4Pj3Z#j+Vy99qjX};h#sS1sSv# zyW&Q4Nwc!&3I8{ri-&jtWns+Epx&Ld? z0x?{gVkwIXkRVRFF%P+(?~(;aWM|znN|1eSo>T%8oY$lHeq#Ij415wQMYrkO z)swP@_|9q;Se&hm6uIiXO$zB4&(pUY`#9<9_$`j$NK5R9x4t!$&?<>j1RXu6P6yz~ zVFU_M+LH0n&s=3chdhU3$`7t>{4eRppy4!A*I7z zh6$z1;k5|+v(@(z^M(+CcE;hTD!frj;}%H~`GC=cl6UH$K(476Nx68+1KK7N4HWQB zv<~ivJasQSZh0J-Gumn`Txs%U)>iPCt+0S`-0%|IBlJTPZ3k)2F(-&{sq4fvRX>5R z&nVXc_&O3f9)fNZ8x{>bOAR>Hvtztjhn~}Bw}(reKnp|hlEesufBiFQ|Ag{~UvLgol?f`eHXDyp z{8-kq%Q}53@mb}IEzTM6CqQid>?cqldgw_tVD6 zV4CDl3)M}+P0d-^B-fdX1j3h=%DV)@xKH;!-=qCHw!EX_GZsBL9@ivFzK>iNu&BX* zx^1#6k-LiQm1|w4p92FkCu3|j@VF?r<#@J+C{6&+MT7T6g;#>XFz-WuLU6VCHtb>J z>T!gxH=M~==Sejn8!;}bcm@fr#qq*oE30mPYms{^Gv17!t6taQ0uNBZVIm>2(^Qx$ znAd8Wxt!1(39-W00U988SSo?}u;b`A*uv;dX~s=%cnQ4efpU-F*dR)HfF&PZ!oEE2 zkBB}WI!uytfD~uFYX)eT$mcFD^^gnT8nM9JshpNBI=+CSRZQ}{AE9+wpS)C$m?zR4UI?IL@8fTakR{Ew!DsqaJB!9Fi^gu%#j9P`W$_)}-rGOQ zKK^tJK}s|x7XJO!3$Q23`0S~bcBJRbbE}Z4`v*M_F@Z^q*2VXufA>OKHbvMUdyz zYD&A@ru@hb!!h^-c!EwL8SK4581Ed_@RcZquHWx$6Gu}EKMg68P60=ugY~i(`(b)* zeI*a>R07}G#IsL_AP6L$;B^4uhdC)=-v#-Np*zYrH1p+!su5p3{Zsq2E=2z z_?|h|FmHC;hQ#C(=xzN_F+xEPm)WC)t)kHl^ylbc$U#<*zVvIgztoW~rvWRd*rE1Z zhrosv_Y>`bWRX)rUIlEkp8)0OO4|naX3W?i|GOfzhuXz>tq>Iy>bc({9Y6h(FQe6LS4H^eb67QUs=iazHT@K81YU4qv$ez4ABeEq@} zS4XgecHLY3+I|^H&@A4-s{Ydg$(F4ZR(BU_v+@c?&SzeLyH1K?Qrf32YL8@o4AKFP zd=7WlD3)Cl@+%qlYX8MG1KBtoTMjrlBVUgEk?xVgl_~u7D=b0|>O}-=xv(@seIl3+ zn$R(C@;o}EjTG9bE&(v_gWA?DZ-(MU?}5IK9`ccTWR=Cb7le8@40z4tQ2vQ2Ri{** zBwzRgMFN0Xn!Bt=uYn;-I+UQ7N9tX*>L~w7FQ_q6+h3mkt}eExZ}K|67pWF3pLYid z_gAd6@UZ39_Se8hyz(t&ZZ96APvu)xPE3?MZJ7RJQuU6Z>xbT9mlvB4A0>M;!p%D4 zNdaZf2EQUO(uI^jmsD(;2QRb%-@*75a--h7cgF^n<9whW$8lxKN8aNn30s7AHZ7x8 zy|X(l9U2q8Oa7OO(8Zh$PO+EXrqeh{kp*6yF~7+8?cXbj9f#7|Zozvbl9O^D`V4F# z%zU&bK@l{BBik|Rpt*$Dz?gtMy7R;fG__gddEV1y&HNElFHwrG8SDGTtGol$7?7&4 zMa(%k9E`|2-_U{yeejqgUcYC#;&f?dcGyJ>)1*XHlb9<6S${XG(6+6v1%|foV3TSW z*4>nCxD}(x2~YPC&-XCLSZWjVZ|-pEl4ELWY&B|L#ZLFdnKf67&KeTSrr*;Gh)|OE zKNP|$&yJJ>WJNOWY?Yx$3~pw1pAY!SgxyYEx$>#m7yw4CQZJJc93^-lEt>F@UI4+? z5pjz~&!nT=!NxX-E1IK#PO738ay|kKTxW>=zn#XTr$Mb-rHm>|)Flu3wjtJ$)oYuMvHFjm`0$gGRJ!lnvOQyNjc-*nuI$KY?iXh z&K4>oXv5!~L@Q5S|JH`3u;F6CySYow6_+uR#=J6160`L~lsBWpNAq6YR?%Yo()ZUQZSTr6;?69V;q=hm!Mva`~5;fQ0}=fJDfIT*l|8nc#?0k*q1 z94jYnFoj7c=kVXVaLe5QY?qsxWcS)szKNra8+*ViydyZPs_vXCd2|W4Xp)HGM0(bRnu1K~hVHsf}oN;Y2MW*7N0 z0_);3FPYFJ?>0-?g@@3__!P0Jp|OrD=gYu7)L_gGr$Pr#p}eBTDtcCBU4}RLo*g4P zPF7)_z*dcnb2?b4<=FN0t#EoQNG;uVLBsMW-`SYVhWDtLh5Ws>Eu@tVE74nD4LMAc zWnpt2^Tfm4vbD<(aE3oE?D>lx1Yq4ye#a>?|<&82iQ7{mYu}X0e;RBz! znt8&nOM${t9cp^5o%Yl5-SETgB{u9(f>C*XdMAH+*n;+Z$DLshiRgyn%in`!lF7oB z)}G&K*4#M?rrDsY>oksXoSwKgkZVU1P16K;f`1XizwH@u7DG7B3+aC@#eHLru(MTt zXnB570gsP{uml{l&spD{@um?=f5N;$tE6S;2!02?sM_uVjw;*dKJou9LGv~L&4Ct= zQNAx7c=x@*9qvKh<1N(Vqyja>BhbeTAFA}`I70+i#k<4%1|KaC_YWbR_Rw#Y=$L!j zW#f#-2Tc2?rUiKpZ*pDvgyN{pdFZMkP{mFIh#%-iP;R5Wmi3Be1X3hGA=Cgh6d&tC z2x>cL=cV>j7rbbTK-W=ytzLmGZKidv5V=>T+EF)t4n6zkAtiTs-A+8-4FLi*6Pfrw z_yA=XW!1R-vL)nv(m$+Hl094cq}F=rM&VQp&=)7O3uhrU2kM%^HnB0s9l6$(2cvtQ zf+BFTx2-#l&SZn{V(L5XhOx}$71NYF?9v3+{lU?d4ilye-@tC!qmE- zjcn=??qq=%xxM=b3I$`6Gwy6+JO{Kn59Tr_@7BH~TrQ^XmPKx({eCVRnYKP3;B4-3 zYbmR0Fr%bv6t+N;Wc#THvG$C=fS=g1ckx2Q+nz(wd8Z1Z_?Ox@{zVDhtLVL{6}=1i znlLZiYc6)YGL}&kc(BKngry3psBK6-Et}KjE9Ksr#Q3V%GxRK9{*nfE2=J)Ni%n*B zxVl^p{48i)iRYctIN+pxWM@IP4anq|tP`Lj(8F-))A_dc)YfnCP37F|OO)HxaJ*3A ztXMWRpF~58mWgh;$5ai5@U%v!Nv~h~(FL*w`uq?2L4Fg#E2d zGG_+Wv|>F3Np%rFK@3FffamKwssME{cXUmxHTvnYKp1r+Ss$XbBfqe@hQH;gq3Ic zbl~T1t3VI}DDG_%jdD#{%H*n#;5tDYojS017pY4;mo1KXg78GhUNjOqDs$?K1>B&VC1O*Og_+Gf$zj18PCa7#&KWcxF+5q$HM$ZNwzliBy zO=(nlvSMN_t4 zn{hD|k%aI|I`D}RDxRn&SehhvVI|;zJ?3(1jGWZ;3C`BzeV6-WI&IW$K;bL&B-}1G z%n>f9^|)v`3$(iL;lpV?2d%Kr0^KVm}J@DCf(saxhMA*$twllR1~_{F34-H_ZWQ zc$E}Hod0QXCSactNo3CV73wa|cECsfP-R>KH>CWFOQj)eH zv^Fq3(zTC{O!k4Yn~_lbVhG?6CS`hp-gl}z{q0gUJu@~5eA~EsP!4O(K7&SgF2&>m z^Bj@p&6^S4cr|sYHSH04Ti zwC;ROZtK0usXJ3bp?6siv39bD#DM0z7+~?Y#jq+U^WdFp_&(EDax61zH7I zY?(A#w7oX50UQP#@|7Pjq_p@UD5G*Oy#MU=*DVZo&yM1%RnP%euKudwoDrj4!3+Qr zCkhkMaK#@;rfrQ~xMOi_;6(V0zg;tPWT6s0{^}I#UD3N|+rIog<4VoOZY)69HC0^( ztRBz+mk9w*^!A=0-um{AJJZgl!<91E{#_Sc9z_V!ocGrv62Am+pVha4 zoj#tNcd1;etL&3EPASMQVf|;A!afn%p}QFRO>y43y58?(VA(?O0cDjIV+N9;LGeDb zBQoFg!E~UFg%z7Ed-lPQycXqz9P3ZpAZnqeph9X}?6)n6Y|KUfFzgXA&00km?(Rz+ zQP72kd0n<9eFPDi=M?E`V}|?rgqN4j*#adg63nZzSN*?aK^{PFGaxT67&`>7WAJkN zx^i{dhb{KP+P_9_xS}9f_faeIr;boB?cai|aK`QX;*v-WbFs6?t6yJyb+STOvef99 zA&Q2lGw9o3Q{eLk)$2oa zM@~m%5@~-ZIU_F6qYkZ<8VBmlgZb5?Z5RMTbJhkz=s2J!A4;Cv>IVE2gU*RvB|-oX z&S_IAImG=|tga6u>}HnE?gy+@jfs?l!5mM$Pnn$;(z$$^)%JKHBD-0t(>J zXsr(GCY~To0k{r6B7kR)&Bh*X631hhonHW5BEJIu4g)kPh4fIGVX9;R$jh1Sx0G0( z)8n`s^Kp`ezGO8$dZrs|3Rb=2Degms=s1SR*Dp54F2R0bN_wQqfk`AIoej7Cxl^k0 zAB$i@bj4d*%hF0Z@%I}+BRmi_eH=!BVXbAa)XlQocq*+6!uz&_xc}F^C?ydFBQyc} z!AE0`5J}dUJ1TsSj+AoeJb@m5ts6nm_x>>y9Mkp6X2C$8WFB0DBVx3FehH^opR3w4u>GEPt+g$I@%uN%)I#PmRLB974yNLz)QTp zp3BfE*~~#-Imm?Arm^(vY|^vTR#GtKOJj$}A-C?%y%HcH{cmM%Z#;i-F|=P`fB`Gw zRBIQqDtQ4K3w<@f7~^-O`Ve_=nxzIK9@v{1G;XxQm8jpV9C@So6l6Al}GEwLu;&QU38R$&%t5so~Qo3#VX`vjr^Rhw)3%G7gZ-giK z^DwO#*ow(6wx}jcz>!{AU`2pIo%fwSP>6nto$$#2-P~h%Qa3ljW@>zO>fX zIO+wqB3bHQ9u4BY60xj~yvHC|CxhP)HIwdj|3M#@!_fb0dZu!wzjKIV{g`*9>wrD~ z7ttd22eVfHAg+To)LCb+FWwJ5m)!h9Fpd14_h{8C)xG6JbD%P2s@SWOC{aI8;0wLs zhnzw%smDNT>`7G93*yJ)-gU2fKp^_=Vub}crf$#N1>A*EBGqc&F*iLy8SdMd&qyET z3n)SrcEsN9sMl^`4(5uq)xr-E7<1h`uiDV4!hs$POA0naC$?${^A73t!~w~+zl8BF z^2R_|$nXmnSNlT(Y32f^ystJK5?=L`uTS+iLP{=c2|~QDTRld3;V#HxKJfsJZI(gN z2Sz!(s}$WE4AP9@#l*rDsS!_$-0X$4)ppH>=tf>Ymdf)#Q^jX)S{ufl;p6%|&oM8Y z*D#j61^zZ*B4x_|$0tfN1pF(#6t}h^g}+1lmfA{RGTw*P%|2DeJ`j*BRFySGbj{u{ zU)kSGXD)Ei^MyV{mxC&}MK)1UUvncMsP<2JkCTjIBzt*&E4b&if8PO) z#@A6`?{64+HEn{kxZAgnkk&vu-%V3)oCY$Zojc{5_${TX@Q*ypqX!FY;1rdW7eyDN z7236WRZ*XwN#AGrcIOW`u3(W8^bC@o&xH{yD@a<_$^O7x_J9T(7=DC05I)!u4!rE5 zD(<#aqJL^Th6BM!>V-EpH=BqfkIXt@>`tFUJ^lr5znwe7kp=Y2CQ=HJ#x=<h|2T!Sq5H(HWlS${Ah(47iU zjm;p#ccBqlk;wo>+hOB)7jw=cwYr>DMh8dCJ@E5K26pw5=MOcQpEm960JDk6{@uOo z)juRO&)v*m`+gYg#VL? z2ybu3dx4)kr^1HpD%~{LJmsLNkH9pCw*VX4EZ9_l>Cms4ne+LGv*g^OVDHJZ4bZR; zgT)JW#)sy{JJXGNe1Y&giJp}jp5Z`^~t7F+@?iCJcH4jESOOzDT*(xvX z^#iGFz0v0eJu=RFHaqYS_aY}p2UBDM^FQ46haZd$p1=EM7cGTPFub6{v%S_dPz(Q2 zR^)WzW5AJqdmciQ2Cm)XH!rEQ>3bCdlo99vx->C#i~q^qzxtAJ#waLlkE7DzLYQMo z)lV9!fv4%v3LrP7)u$wMN(?mT-t7Dg{LBEI1yS@T|@f7iQS zO(wtcsVRH&aC-U!Q=A`X7>Mrbk5h)wNhR!ORYX3=xt0)%y7TdI0nVay$gvWcFY}H7 zIaM(E{9YU|5WuK)lgyo3xtxh$5Ot00r_b$|H;F52>=3OK>2twn+p`+V3Tg<6gT|r? z`Ff(Qj2{McA#tSW<;b@DFfGn4iB(N@C$=~uAjmEu>%a`(?oaSmlyR|!UV1{}J4B5V zVw0$D2B*~s05HI*47Tz}>ZqmzL{XS50M6P8yNN1ltAyWTmdsc;LQC|4A$E-N%7>CD zhB$*p?Suyt zKC$&=2;X|*gDg4QUWh*(auM_skJe1;;8BByZ7%_acouN0y-*~oL-ldUNFQc21GIkZ zzzS^6%I9D$N|b?X#gE=Pe^wf&wpB3=M-+Bhz1jubkNW8Wv=NC+@H>g*+h3f@(%*c4 zA>=0}V85y!k{#dH#`|EQcDHr%n{iQ$>y1l9pWoJ#I}Hw^4eNm=B~)RyjsXzbXMuQ@ zeGqdCDj1e`JN5E%4TRRO&NZ*3x6zp`eebxSoOP?iX4%B!X4QLT>{?P%Jq8tZ&5@PR zdl0<5K*1IeWzA1uF_-nNH06;}fUmm|ppBFGb@yN4FC; zE}N}b@R+JubxE@lGw&XWsFF`t=>8Sb-*iXA+MeAJPV zZp*Dk#0YE%^C%b9ndKie92GL>ani&GLB8_jx#j*s6{m_r;dqY+=wQ0Ny%xDei@a)Q zf6(s1SeRW#sdDh%*f@-*|C+CVI}r3=J$zZm#gGcT!M=kN8Jc%N4MvhWt@OsFS6AYa z2{luG=om{Kvt=+r@ot$Nx2%J;q7g2dqQqy73Mw2+5#Ifhm$afQEsJPxVXQ9m*MrXGcsuA-E;rH#&jge!~mNky+>-b6Y2y7B;L19Q%S|v+s4qk=*@NsXP4(-5RW*} zY;I3fCOX<+zJtz`BvS_KB|Evo*b{zN-@mx%xow6BG_99-bLM#KhVHS_*(tZ}leJH; zt6Wf-_|_h@it$I?|YQI})CPUsdK=UYXLP{jo z8>7`o)Sqb7Q|lD@cfhX^h>{4!>D*O_Q3jL1urwaGE5_nZy zX!0oH7cl!-emtP%+pIvmM(cmDW%uVJr|iu?>=m?aJiLNDO`^1QpUXYYBZ{}`91vCq zu@do10_nL}cZr<gXI1c}7w=QB+;{#06k1 z31ff7lArMr4P%R0UKmC zscrFF5&r=ITUA(vDX7zGc{FGC@NJwFfgoz2ESkW1E)||gIfm@jJbmWlKwyi)8B&_q zGAjt#^|ojaKDC=`;lC|eS<(Ib2B~3u1bzB=a zV2Uobeaow8v+{(^oC(_}tWw5Hvk;B-2L$mlfLnaNjRLC*W6*_L4L z^9J5u_ty3PLu73|GnYl*~!r#7rbiO?op%S%TW&lD+Np}w~ix2nHr$SSU zW2cl~n{fu_<6gMe0zcm=&H<^oIl1Gxg*(C3Mfa#da@Wdq(JZE84e04_TJO>R&!aC< z_IX_D8Z5|eCRHHJn=t5az9D<~B>KqGNvw7=FBMPxo_ff58^~<0!nb`H_+Au@VUwK| zD57Qp;_KuhOHBpHt79|`g8l8z#5i}p-x(4L4B&_bCd7ZpA{r97^LMu$dPiBb^0J}% z{9m&|g1q~ok1067Zkj}B`V>vNJk`c?t%uQBXgu!m()SOhWckzTLJwADzlM@QGtRwc~onhg3_}IE5KVQZ%3g*NpOrRhX-gb z6pi$Y;tTnq%2Xr#k+}5!Fq@&F&c^$7(szxE18Svc({>M~>JHsn-K*MPeIz7zIo8)Q z0lx@}H0l0z`caA{c1eKCiZ4ieYyRsn#SHNt)3=V!#=Czc`TRV^nIudP1=iAJ_|+MF z-x#e*WrzejF=Eo&e_>>eN9xNgY@L2G7oXpUPmEi2APl#5u}=?*f4CYsATc3ea9x7| z@bQ4B=mYeZA9lAOu2hi;_*0rL)fA!7lq=yZDdi4g(w}?YE5A&65N#;q58MwN8ubM6 z(sQTO-jGBk^Hiz!XS;2Oe-GaE9hcboL2qVT(NO`F<0NLe4ec4l)Z+ElCXo zi1**EO%*@blIi?;Q>%bnlUe@e=rFIweVKSEr2I9HS(4Kh4J3zM49DvBShTa1Tj@a( zHM)tA4{-{<$9Z6N-^Ahs6EE5jC|6}REMrj~HRuKU2Ri2IHn$Ck41xwAe|3H+yXRSW zI7;IR+xXUNnkJ{7?j<#o@@9N|6FV?@;J|J(6Z~MWf82rfW1IkFK{W;ZcJ?8Gx-b!E zTvc&;@buWxx|EJSE!wP$bZU`o7cQK4YD@d`eQG}wb4L> z4LOqqVIFWae}(f@zhi)Q{|KB8o+gG$B<-;hRD%5~O>B_In-3}!n2a;+qrMuFQi<@Y zCr8pcrzjCiuls=o+bL!z?HhI^#DBu(Uex!+q2gvXX|VPhyasT%H;MRzebp(mL}~cu z)~uLDM()O&*UtFwiJwHrer{#m`@LOioNo+mG^^^!O?QI$D}a>GZoX@A zRR5kH`GC30X`$@gM~bGQZl<@9(PXzxmr*rXedu)Quj$~!?Y>>j>h{7H%e7Smjq>Je zZw3Gds^gL5;QLf*?kO{$tkrYVZ_@Pp@UdN3O-H_n;$qo9?-5whp)Z>xK?~-Y&UdF~ zK#)NZzdseBqgjQ~Z?{l_@JiHYsn zq|(KOD2Wk|hGG_D&;;l0{pw_CG)bzwEZ_6iZ`>C;M(V|3ktv=R!{gPm6=B7qzebn7 zB){%GzVJh8LuodtJ>-kgNW&;QWPoGywGOFx090BjOeSsVI)*B%l*e$Q^}*faYI~lI z0Py&qy4C|GP_813eje8gqFlf#*i?n0xkDo?;J||n=om`MZONXaTABO$R`*s!qzHP- zd21DKAuXSjz_bT-u#waAR-YLI`TH>5+5&WO&M%d= zc=9=`A@ty}GC_@>G2#TnIuF|xdaE>Bfw;xa`1RBSh|h@B6ZnMpd2p)LI*z1nZQyp%Du;kP^}&FgB!4? zb|nSQtbTB%BOd9)HcreE-EVtIU2k-StdNWBvZe!@DLH70h63FuP9iyr+f^Q?_)n$J z5uwwKE$y-Y`)U;poZbPE;{(liKy_`&BGKa;0pmT20P}B4x5$p>TFf2RjCW7gr_nY< zA@Mmv5bfvx@QJoOUFpyzyD+jqPA`u$SOsao2?$3D>3OK zUNh!D(%`j5Dv?iTbp13?oG(&Cb-p2%>e14_zZ!1d7i~ShK{~YK6T?O-qN6L3+^cBF z1A|H8cZoQq0eS6Uf2InR8k3}Ln%yClm5(V{SE!R)$+$iKM0X?_8;B{B%H35IqxG_q zbuTD*4=Lx(?-1UTw=j>7zo3!P^~>5&xL_IX=?WNVlq>sm>EwR z@?U1G{7WlxF#mjdY2A~%{|w1JY7>$ATQz7l+!IwLNO8aJ0fWjRQ#y8+TCyx$Cda{SX z)dIiXcJMjH zKU!sJxN1;7`58MAq?XG|p$d8qks+QpkHiN@H^9~*Mn~t(C>40a6?b(%!UZGU#9VNU zV$~+#I`y#-lJi-ebo+KO?Z~6q6rb(3d^Aiz>|WJv3=J|Z6GBrDW)fB)(&yT*qf|!U z4J;@Cgj$Y<31S~(%!=7Ox0zKIhr31}e2wl$b>zy;wHD}`_>seVo^3eK==e)QXk-kf z_83F%JA}7rRMGD_Wf1jOLN^%PYX%T~v_sZTK6L$}vkBjt3v10+R8aP-F2aZ__I4*zvPhaxNZb8TekhGEdFQ(IlR5{0;T_VSCafz#`k#9nxllsqanQoR zppisSC)!StAEO8pHshi&Wvp8_oN(`^>FES?a4=1i>pnXVS?>`>aILfWmE6ptDeH>5 zKwZla7XC*8>0YFJF9(Hn0=7q~!XD?p^M~$dCShw5)}=F9|A3_J9^vRm`$R`?x7W(c z&0g2b%gjy7J)hH1Y{AInL-rT<933h}!}26<j97(}eod7)t+1H)VeH@mw$qFiu((?9gwjvEMW$v_2e_@x@~ScAnG^T^4PRfE z)9n42qAxM-Q&t@}E)@i5{+V<_(~DyIm!WJ4SpXiQWxN^EOITU!p^CLosO~V~vncpW zWQ42_Y}B)U96X@I?S7v^$ijUsFfszJw8PZ;xz(w=M0dG2SDxJ6DU)~SS9AvLmX)rD zHc~u9?7fXCq)bXYLqj~_OHxula;`%d95&h`UT_7%pK;JoK6T~mGKQXLG7 z5ghy9AZx?q24}|mXfX_Vd3BzNUD7Hu#SQopZ+{}|YW)L(Il>SZ1qdx&9`5V}P)a?= zQkvpJs*o^-mK{3jjDiFP9oiiitY}t#;em&;A(yk=p*qjWS&}VnKteifI$CObkplWN z{y0a0P`plf z@$!Y{)$>u-gz#@SmM~xpI8RCelX~^071#kXn$r(NtFj*cB8T;GW`FN?@prvXQuLKv zcfWUi8mi4_3poHb6Lo0_dI!o}NjRpBg4(g1bV#0ZRe$O&p64YjieSv$t6-AaPO#ZX&}NWX;C!E? zru)j-V-&r8-F>q~rr4Gg3lM7Y^~KV6`NsiRZNNZ;Wln0Bn=N1+3TmxMxN~;0sm`qB zorg;5bP72EA+7O}#97Z6TDra9Z+P(Ly4tiSHYf4wCe0oh#2~>)8g2rBZFjI>VVv?Q zFN7|n`A=`pU-_4Ypaq3)F;~scVgak*e=huqfB;3{0~blrQ9qF7YPH_0S}O=Hq>^s1 zq?!;Nr-bi5$efhaXI?5&PT^5wUMV@+ZpAx%x^{Mz14Ho4O?FXgQ4SMM&=7tnjN&=# z1=nNS*>c6ys?b`UPjpnL*6?2O$W>f#DlDZc~SB3!Uuqj@d19|JE{CAK78fA&?|X(8{%SKJ36BM`@)M zK4)Io4+;Qxa4;mXkZ>eG1fQ$A`KE||BRg3y01MGn&4=2k7`uMJBr=0YJ=_2?vJZsp ziqSCq(Fk9 zAaW#A94BVwwZ!|IiSTk~$ns92er-c*XP^Dep9kY?lHY@mAm78fKvaQ zzIa!97v+~LIWGCw76iEW_=V{A4O91Fij%px_Ien`YuJ&dl36a_7l0zryAq9q8U#SAh##6|tkv_TAe-afD#iM@&{TjtG2r(X=-?Ef&5B6?8txCe=nJP4e+Sc_Qpe5`#rjBc?;SUkLZ z6U6%;dGqDGy^q-lOD4>`3gL#6Sz+0We(3`~K`;DW7zP3)l2KsPhL^o3=}&8irmW^? zMy`imYjkD(7W~3bL#6V7c3+{zu*i)uMoH|3kWo98f$487Wwo8>{cy_ITD+J4IO_v( z=caUpF^+;xD)UM+DPyfdS=)l46}D2#2w4jZ1QQ7lqB39gNHHi zRSD=KJa=njF;~!hR~-xTc&HN|L68@2kS64CkR;b_ z({wRXJgZgO!4TWBql{;=i3vf1F6a0R?8BL~cU|H70<`&|;v2<0^o%YT5ZSF|hqV z-dvQJ!i%6Ho@GPi3<9w1vGgfXc<&$upy11ue?Ga?+Nfo3Wn6hrZLhgzGFrN?QeUJ~ zL&L0C>^`F@v-N*GYT=;uuYv`dld5R`f0x|qPJtP~$ZoMTgCT&XKT6PDcp!#_&tCAt z;!~3mr4Z#=J2rC3)&(-}_;dJ3R&=p|x!Z7P!CBAyCKbws;ihgldn7Hbk#`8#n?sfd z$5w_h3RA^z^{~`?Rf+?wZQMTlCVa=Gb@+$lL-LG!$IIJ?7juGRadu9&!|KqJy#M1+h=^NhNd2F~#RMt$k<7I9`%d@Lk+kd)@ z#STLC{h7E5vQMsA0GYx@)Wm<*alQR(7=7vMD7`B({K&jmf-^yYVSY-~5@V+50Lf8M z;jT9VMV-w0TR&sjl770(+K-3_mY`5#mO|y351ZCkssJ-U%)bwHD{@H|!8Zi-)p-$g(L2P;s+a@p0||to^c4|1 zfNHRLd9HKc1Q31)1x7Lbpr|Kb1C3Gu+d8L0{0M(MZXeCsoBNA}DGGLVc-}(k>e6{1 zLr){i^=NIz7f&7i=K=>07lz0g04MoKVT^jOR4r9^Ca}ICDL+i_UH(zLskPbER)i+8 zAvb%Dq%{czDA#1X6}`#^EZfy|KBCSuKu+u;%#V?h4MxBtN&M1w>#!^c^q(_k zZ1D!Yl6Zv>AX50bS(ajmYTjxUc?6DHKZ|XMVb4#>I(!w@rlto^gT3F2Y}Oiee(tZ+ zCeV(%Idd;c=V4x%am@&4rxus|_cWZRL;^Y#F|NUP98mu9| z;7|Fb(3+BaA8>-MFV%$s*=rV=6}a$Ybk6h211e>W=@v(|M5DZB z9@Q-sQ9p4yAIdFYL$)cHKnN=Qx@k@jm3rMU5B(EF`l1BA!isV7#$?cpI0|h@0q{~M zr4he=F$8fzWVcHhFWxV|0A*Dh$_u*&smdGuM(zNdCRO;2cd7b7{H*KWpfB+YdG$y78WK{h~k!L5& z0B{l8)y>Owogn<_0x+xx{vUk)rl~PW(}z-odkttr^=%M}SLxH_#`Bch;PFYdI1d=e z*C_^g__JzidA!n7ky=xU%~O5KvG>V%I=={Kh^5fJ=i)mBp&%|qhnfow(-Z7tDWGs= zdnrwxI9P_&*Yj@r@wqSlh5lhixJVQc>Cv)mMjZ+5g`H>1qGk&eF;kBlQBzuq3I zFE)H*`uQ7JkT^MbX#_ABDBLbRmm9pwLZ zb-wph>p>Ix!ph07iBY<__ztB+nZ==N8Bn2Qw}dn;%8E2k`t3SZtzK>ZYQzxM?~;BW zhP5{52ZAmO{Q{=(wXT9o!+KPgRro)}&^B}BGVeU7khBL|Mjx+TN8d$q%+18eS^Mc4cJ*&;8uO3N*h-#77HA!*V`iDkQ)?j5H-l^ySv)o>+LLCpEsV~M zomzFdn(2KEi{w|QqC@d_g~C%yg}Iax4FV zGt9y>Me%wiX0O^E@Q`~T;sAPTDg8y?hjYL|UVF8qdt{ti@*6j+O)UzOCgSiMU+{)`#_1#S^e@u!l?+YT<~IH zq(1MEhRHVTye@DX1}rL>J~*FQlz~jpr03gYc}~~h@dgABKgU$#YrZ_N<8$o>!OL~F z#Ez}_RiSMa{RAqO2aUESCv4ln4Pti1B<=AlCAC>O zzM+S#g4kO#9!ctT@veLm3Uw8iU(TIE z%G9;FU1BHuO^wk1)Ja8>yDl=oh`3%fyw*z>$YhJ_>sEa3OV6eg^~|_NGLTBmN^kQe zF+68Oap#F7k&Jm`g^P=3&TJSYCO$fqUm{d!>!?a$%;H^rYwC)0RUz|TmuHI0y#6wnJJT4tNAH}9XrWwI#_okknrc5M(N6-h>H zyus}YPo`(Q@D9dj$jl8d&TRvY?hg|1_iXTQ0B3MXDem-Qj(J4Ufo|Qp!TQ3U|MyPMG7hgrhx6#^*TWOAO)n_7crakZUjO}(UGu(6RaSKklt=#?QO!R%Wg%)Q1?Q~|i zMl$iO#R;8VQ>~tl{I7(I`l$lFzHiHax7|K%Sd|e^rZ>gksJrb~O~=l<$8lkaY|}Xq zp*vZZ57CXSF;6}S+r;Q>X4Qr{D^RxQRtJrnOIPYX&k>4GV}pKhN82fHm~A!TD^dVUD1gv%t8~EsPq%ds5)RlsX&7{@fF@~ zwQ2`d%hH&#{4dL$oahT`>G%!^&$M~`tOnrTW?YE}qd7E?)?<$wD!Q+H3KO8F*9J7q z|64Q6$@zpfy?g?m6H}gCiMRKpJ#f@;R-G|r7qM#gFPY4G1W)?J8=Q*PnypKDlTAYRA=%fPwOogr2W8~{ zY3qI=I9(aDiW!M}Fq_kI0i-}zY^hQ^4Bs^2I<6Hf*JBd;Mq~xez&|pU%m|=L_X(C< zc@emP$?BowmPJrx-#&4_e)tfwIyUA7_^f2+K+_}slx}Qi^!jGlu~Pu_?gQsR<_2#a zv%Qq&e^d#x1c~cJMfLhRlh6smnq>_CWL?ZcM5+tc-y0?2V~O6f3?keAoU^iF#&?oX zllobKVj`OJDBSDWJ1al{mKC0=lAwI&4)7>x!JXJ^7_-kSCITL(ZO)embzer&e=!&0 zLo49GPpg0HKe}5lGy_5#9$4Wz`Y*7c+v_jxA=NBPJysOFmsi&(FY&zL-@J``RiyU9 zRh|cRjb2_$ymO`R+j0QkB}T}K`b*U(_}Q=zJgdt?y^x_F zRv-W{t$+>bnBqy`#zms>`W=WqE|}OU&;JO>J{)Gj?PKq4l1a!{M32);Hx=|C1NLpR zXYJOe0clkc%U*N$sR@ZS3QJ>RDDsvQhj*mNSGBTWX15904*O42YwcG^_|FVd>P~#2 z*o)FV+9;!D&(~Qomv3C&pHFwY-U`C+Sl+-!n-6TMrmuq%R$ITZL#&nJZ)WEDL$U)( zXPgCxOGOIVwAtq_T4AM8y>up9(B{cA4?C*gg+)$NDnzXs9Iu_(TRXA|ja*Pt!0yo()*bvyl_QCcGDhI3}l(2Oa1wpI~rvTqh*x(<8piXX`zn-cy~L zR|CWqj{sB3#q0%!+ieWuj=-c`bKN#Y=2QD9pVn+Z@jo-6z)erQ4%~6sntyzJaQ7{1@ zgh*-{f|3&{nL51{!zmc8xPHU8Qprt=B?4=q*$X;IVk4FhY{mN9?xo%6)pHC-$`;!s z0BX^7@7RHa4TRXT2C46*JtU=6Pk)My@i-G~Y1b)QGPxzG+n_sDYQCw;Dkw#%&+D*u z^VE^|8*rJLIOpf)zlRXnt#YhKxHahZ1$TJm#yywsnvlrJ3D+LUZE&C$l8^$;MdZ)b zR-4U5+iTEF`@Ndb{jUHE##`*Jb%+99c)y23YEB4x(1Y6%YaOEbr3x4SD=hUqWKo^m z-f4gLtE2Ik{8l#vns4J2nPuIRj?u!z&*SW1d-+TddV)KxG|%|SjZdYeSrU21KtXkq zob%5MgX7mK1SDZ(-OhI(|7`hXYWb%*fE@XVn5@5_V69rLhN{%(n@!DfH&#HbwFtP1 zryEZ32CCbA0P=GSA8K}}{|3^^cm3z~(n$i1>Swqp7A337H4uyxQO_jI;o0})8qn(e z5%wc9DW|M?KD|a~tNLG&7QC#Mb*i7L(caA<@oFFU~0y&&uLlLZDh_>4uUy5bn1s zOe*xpTWqTPfA(L)X-#BQ5vX3+nONsENr!erE$)$B<1voF@^xCNrV-#Pf8m$Uq;LDI zbUUs|UlfYFI=E zVZCD7F932u@IMylOp~SUo^O4@660e=8$EiK6LCO>!kh8@@TQ*VjL5 z|Im-^K`)!4&b6qxL42Dk^plCIC`o%ZR9EA)j2AHK0g}JEK%LzwL`d??E+*clkCdMG z%oiMAO%68~`DfIAFkvuROFK<4F(vh)5477rT@Qw8)ELrAQFTPjfqjuz|B(TRMH9mG zn-JOWuu7X~AD@_l%HA{1h?1d-)z`4#7OwF9yKpua4zFWq>Pe(yJ`!VrPgjbmILZ1r z(~WU(Ptl$}ptsz5hZpn`SZeB;3kD0^1JEmQE(sb8D}FW4YXm&*-FV>;4H3Nqj5 zQw>a{nMro&VK|IN#ZL4#!rl*6iaQRAA#olXkbTb03x z$_vZQs*VpPAN34)C}}B=#`MmS{yaU2;7c01dj#3Jsr$e$-fHFJugPfidiw3_-2NV1 z$;1x&KQ}1hzxjRvCywJT)4Z~p{sIaLTU#VCRjKkYpMbqo@?5!Xo< z=N0rJMl!}$c%_~&T2LudBeVA*Ud35_==%x}=W2Y5lyQopxH+^lq8)ApngNP5M-%&X zXpek(gBkN6@*&jql(vwqnwbf^7d?BwbvgxJA2R{yor{UOLfCRIQ>alwKT)A!cF{+n z?2|jWfFDErsMcRm?1HSC%Tg#YZy|J$C-D^=C1}#Rl)sY+smSKx+g;MBbgwINk+YVx zTw!Im0d1Fg^DGq0mfgB?>gPc}iDl2yg(@PdX0lbts+Q z->~VYz+rg^VL&6@^ycy!TVOo_2Y@t6SkCPR)YzczdM=s)UAYzoW~1CK-@SI;Gh=1~ z0Hz_CjX`pysVRLKQof8nM8}{`)|~Ydhe&`wh9*T_j>%fqTdX+i5{(HGx-Q00Toh9=We_C#)QjAccdvt&cd!(%jJ|-rjV%&U!CxOOpEY4E1i1&> zTUhZ5OM23B1$)R9Zrj<25hO`jX2dp?cnL}>v41A&Wzlmn_mzVlYhJ)8$~r>>K^=7=}j{> zdlNAVR2|38<97&-Z7*(~g;wIwxJ{6gkAR!5Ozj>BjcmEv9Elb; zcJ0Y<$ZZ22g5~PGswjfP^29!qNu2s6%9{%C;_H*ag#$*}`c4GsZLB1h=>)roM64vw z@bQaQqwB!sI<=kdN5hikAKsJNMWMGDL^}KtuR}9q@QDAUeNb$lK{)cBKnqT-1ppJV zYF-JYwgJG<{h9J>Psg@~4g8?;*aK%N+TY%^Ilfb|=t*J1ta95^S>i7KCzcP#oruTQ z%Lc>s^gMFJ0+e2KM5uxA1TPQ4%{d-`IbSFX?y zit%JO?F7~#2Y#8v`stbqwv~~bih9EwszV(Bm zg&L6v@`DgNOfRFUMHDVkp(_POCF4Evd$~b7+F5XCeJ6*xpx4fGovwnwXM&+@vv*S! z4(3V?teV*maY+?XQmc`<-I?P2CiRWb8H+U zSb+oaWL1w_$1E}$#qE}e-D5p@=Jw|C*GR~j0~=p@_^BPQKujHKWi)3Bt=z(Lw7JlD zLYE~z-Ef4wkv-7+Qed^1Io_6=_Z_IhZc&)2JD2OhBWyxA+Y$9zeXJ;oa8E5medpu4 z6jJ*5a^UV&q-BjdS2z4Pw5qw!nk{mS7HUaKh=?T>eWQbl;I+C)82OcjsFm%#7zOrE zD}!FAycL_!jSIX$BWbr)b#oO-oS+}xP7ajlbrHsY5opoOYV8;VAx?HYH4PX|_)l6SzK0)EPogM6Kb=yNn% z{(sH@yav9i7n-@%UgLv%D{vrEMck zmK*1za>Dzu&+&gdVQT)$u>N~@|H!mIdw=iyG=lycKJ0b?rr7{CV@JvK%s_)>=$UaW zP$+7r%C-XO_a?9w|DwLtqugi#r}i}d^{JaVT`D3a)hByD!}qFyPcDj7H6zre2G!Jf zZR@I1Uv7(wum}^$jIqopO7vxV9}zjMD4%t*P_tpQQlxO|5{-SNfdC&q`OKu)bnvGw z@)CL$H8JrQ?ufM8sjhAPSn=swYNS;ik$N1&^u6|c>-JYZ1vH-t_xn|6#>0KAZ?$z= z8MGA1Jfv~O*&O^$sXw`b%(xquZz#5;KpqT9W0s1GOliY?#MBXgF*4J%h)$hJ@=ZY+26&mV1a?41K;#!}JFVpVjCVIaXPR12bBSc*(48pm} zeoFv(LO^fI!sMK5kk2AK9c<0qJ!d41M5uV2Wups?6|=8VhZEtD_U&-KO9Kr5(Msb= z3ruw7{JBqURR|20fV6Tksp}be(4``>Af_wS;Do&nBVW6Gv5Y~VyhzndhG3+SQD^=k+@C6hSL zT&E@^+P<>Liq*!z^lzn`tpO}c2Btg~8W?`n(jGN>V|&@VKesRk?->;;purRk%Y*r+ zh<{9b2T8R#_C%Q?Mf_D(mp&D`RaUw%xC4e^K)EWVy*f{IsOGqWi3++D&!f_Y6)h-k zwx}#a)yvJVv~8f@|BO|V<>5)Ee4lNKZQMfn)DTtopa9&a_gjx|h%h)`&oHx7{UY4@ zp`yMNl!>wM&VVDUHQkRNjPdNG^C3Q@^=uV|6gHJGZDEC1?cbO3J`Tfd)X*Mh zXb(#M&*{?v4DWQoXU`fcMJMM93=eMQtS`aH4Dna|l+FgI4T~_pTZuSrch$y5!VX(%~jJ(Q|%|cri z1+4cC?86oZ=_nDuN3HRZfL%D+bYVa%qs4R+2WzO!@l%KKU-Ei*~r7bG~1P?=%8Y6b_yes9WMhne9{K!_22%+6P%y$ZxE zL9@6@eAQm+`E4ehTi;vHwOYJ?I_8xvRjkoZ%$oZ@eeyGd{W2EV%^-V~lpMSu09QDK5q=?6#*7TId_MuEz@6xPYMKb z=$TA5HmovJCO0BQYx3hgU99k1zaD521HG0W;8H2>{z^@>I2wu;#27YRJR3cz^kOJ6 zUPHmcW4ysL%Y2*{B2v~^Dby>f9E8&E^z*82fprhrchy`!cqNDHGW-7DByDM3Moe6* zt7s-#&iQyPy!~41Z_6>omrEP#K5G(vn=4)z#wmbOzrAS&`MA}=RxWhp<=@{yw%NkQ z#bmH~CIGtP6Az8&p=CaR+epHyH1WQ{xC2%e!-XC_!M^DcQO9X!t9tzetO%ij@vJ4JRDCnwmrhiygIufg z*3;{!win_>|F1qsCltz0ykSHDz`-*vvj{Z^*e!^|j4GR6ipl?G$@Ucs{rUEIA4k6= z1{4tf2ZnEh_tto1^94M#xvRQ+qdQVb(D2f2FMV+>RUkM{ONh@-Wb|q@dDcSwU1K>O z`lV_M@f`wg9Xg+DeSSBWhzCAj{?24>WiNm%q^;4m9|X|) zj*gicw&gNZm3^j*JfL#R`-#zyX&|hh9aVw@Ir;CC9|w%c(a0(pnhX7Eja!L!f30Ri zynX>Xf@FIPhp#W*wa9zSBV!&;OK&EUv<_)$`43uc3?f(e9*lmX*3sIY?Gc8Fb@kx> z{+pM=K*QGjJk9R9wQ1D7j|TQj>sBwl$ox3b0U&_>S^5?Pcae;8Oa{XB5(E~cU@2Q@ zTNNN*oczhM{p!=2zWVY3$>x=flYWQZgGqlj@v6AI$|bfXxn9tI5FE0{o1<@=R(V*~M!m?n0z4Fp1&E)Q4iY!*TTWx_bEN zJcE~HQ$uVlGFU0Ru*LKhG}(-R092ml-%?HP&g6hcRFSRkLRus(8`esTWGVQ?R;TPv z7=*(~4W5fx>fLN96~G(rEWuUk_j|0%Y&#zp%q&}7a|yPLIxyqTdUmL84RO61mtE`- z%0}YvK~pWTWf13Uwsv~gFc!8K`1IPwU3e@t2Ztre!{H*rDb_x^f8uN`MaSJzytg?8 zB3n90uuFn_!GdHcXQqfn{ckZ3DfOIF_zu-koHvvl*0$vgXO%qgzw+cVwjRPPcFcWpl?#2w zdjz8Ih1PWX>?$2VAcJ)=C%vh#;5t|UV-YRkOqdB6?|pI6J_AH%aBqX7?u_I^>W+=h zxhO+cdb&>zM4t3bhOpbgQi5o)Fq!_A*ix_Di#9;uYV;`ZQ%jG}~B<1$! zWXA8;_64S0Q)BzawmaJOwR`Vlcta%f?Xs1jM4OnHXyA@)GHFB78bTEO_kyi8QB(Xy#YAAhqFGI$$TiKBmV6 z&o%33WMZb3KTs#THm$(M)HH_9kGv~9&~=yqP7F`8fy<7vWGDGCtQb*yuS6!|Z!7v8 z3B1KorbVOcfCWj@NJJbQ`EI#LxD{y!Mtyjl{lZ5S1M65Rvc{e)we$(Rk>;1W+}FTg z>s!E_0sZv}AMsmnV1Vgt=azgWZs8fHjE^-v60-|#hLlDyY{Ja{xhwGrTu38)_0VPo z5ih!i@+Ztfv^pJDa^YT@e*9vFc#{=LWp?tQertbIJ@lk=WkeZn`Fm>nBK|a$HOr|G z46o#q|0~SV?kQ2jeDe=Aiyd=m0E)6SrC`;rxAQAk7g-{Day5HP*Zg49_55uW4w;00 z7*`P2q`~{txb8071ZV>#9$=uED&fE$W0s00?yi7l4n*w&MXCboG_SascJ)x3SM%F* zu>Qs?-iUi@1OtLnD`NdJhL39S?qLsY!{($d3V(WSLdgI$g z>aXidi@m=b__|t60qGw7^`rK>J-c0*yzX#t`qbOnlQhXqa;Y2c7 zjzGgQV(X9PJ84P}_6F`EMduKt?mDG4{yVcn-6wPKjS4A~f z8&hf70OVt#;YG|K2O|IYC{{+B=pbsPAv}#afEQm5fMOfBskQe4D?f=``^}LUdRW_L zxFiS8iAzruHA9Pmaj7S8w>Ux%GW<6Hs}MtMf>hA?(DJ?>n1zl+=+OJrOMG-e7d4WIRbe&1dDF&uVT)AC9=oEKF9 zHq?}Rxv!q%sFd!C;7D4BP@Q~6Cr|}I2oaY3mWCREzQ_;HKHT-mk^V+Pok|wm@maBW z>0+eXFB!54d&FI=D)8_)c8Kh&7_GfNDG2UV!@V;KFyw|*3@-O6- zVzb42s>0C$7lABz%Y>Y2b&Y)TCm~WMA$H@3Q7@MmE$cCRA zc0=Jo+jo%zGSwDumVvxRc{kdvReDY>n4yR{`*}zhhDTaK14D3~-K7|klHQx4qw7xLn3|8U@*cwYv&6TFUY?Z%_J3W&SOvvkKV`X6&@FmH&O!?lK= z%6EvoWm66P=)+ie0R=+~gW?Cv>}YJ_&Y|Xnls_YFb(OtvB2iR@V%RP5A0mA* zi@TM$Yrs;{R=PLj8#cU0ib_G`gMi_9(suvOHMpWL%m8oFP1rGxhyIwerdF4c?ZsCZKlM62S+aO9-5BdL&*WkJnUuj5JtrPWJCAKy z7?1ERl4ij4$J>I<_-Vhv4=hHg`i^k^{*kC5A>?a?8lRw0c!3|%fMee#R!)wx956Z? zU4;SLN!6g4pRu#V6B{+%jE#x7R|L&0^(LlxuG|!#Gy=`*E6c$_Yg;}eIHtnaN*ls< zJ_6OC%O@(bK`bnUe1}xiADpOnG1^iD=8f|r=Mtks(0Q53zjm{a zn3^0e?d)6lmGnR;%0+=B%jU@~tTwM@U;+@=vrV8wFAsv)@`(+=bFtgKp~~;w_nTOc z;?r?N1`~h>L}(Oa>ENMZ*({fUWh5I2TXJ`EzbPO!Hei_+lLd2rReYZUbpXzl zy@}CFu&2V3-tlm=sw|4|Em4#xg@V&mneQU5qZBH7z2#Kz&s4OID>_p~RFWi~0i4jG z&bY`9$F(t$G$-eYTenC%>$41SRwISK@&H#dzf)K_6zMh$I?2Y;W}*)(Mwjnec4`4s zFd|o%b;7Z`1LO3-guJ)zMRizQa8F+yB|{QnAk*ym6z%Lxl)%8G(|GZ$zbbcP|7_zR zeqVT%1V6l>fTw=7!hh#vuQK*Lovck_{U}Z_j0^&#keag%TY&FK{QwS8&|i~~)}{ct zjtLvfpycx@?-s{Z!$H^i!H>vNdZhNqrnt2LQ+kUwMz5iH!S$z5cMwK~kuZ1w*8rZ3pj81?2#P= z?D^>FF31ZwtE=s5Sj0Mk)2J#N3=*dpRSeE}jtM&t$>uNXO__d^F^@4`JpeMo7I2R2 zgclMP^ai@6Jj9&S#{NkmVW!4Rav)msY<{5i6 z3larj5P@=z+Q0uJ9H7D{VS-{cbTK(GAWjx05{Buc|8IBwtc}3S0ydsii6y2@to;e% z)OAZ>`@rcS=uAN?lr`qK#DFALJqd-b7H%6h;bFXfEtFXSi~%B%^YE3TG{G$DPGRD9 z_fZx00@q6)a5eYLg#m0AD6by^Sa@?ddi2=bvDc@ifk0F{@YW8Mf!tC7A=EUVlTbv+ zq#RkMufUglgEn8io~z>%vw)2=NK~YZjMcbG8EBKQFurG zO*po|LP&_VC+og!WKi(YafzOrH)^^2KMoVzg#9lswhkCKVAQ*Xigx)>1u48`RPJT) z5YH}xP4qm%1g7P$8#ek^Rx1;HuppI^n8zLWf8}UZr*Etb4Ux&lurMa!);<9WXHFpHu%y@CF<%^U~RtvqrDA zK-9IsM(vGVI6sbQnPtBZa2^BK%l(x4u&aHT-jt@D;+i1{+OX(@`i;LL_C!QUnIIUb zUCT-62Bbu?ocN3iOShot?UZ{Vco5Fl4dNNDF)IGsMxP05so+`-R(t~<*n(FJ?6YeFFLd;P`)k1HAa%6i0zd4(3WcU7+ z4R`p^QgT!2^EK*u)e|ya9$?rbTw{|zY2p%e{+DiG^jesoW!Tv^r3vn9h&UR~i+EGs zX&75AxjS8u>zrYzA*FucClc6;Du%s?oa^6jTBaFfEG46dJdKwj+a@YVF>H4mgKAOFq zPj;55=9t|XK66}8ilsbyjiJUzeT3RQ%-NZoThY9vcIA6T--2F8WbEwpNs@(`LM3gN z9L}o9@R`MsQZo%JZNW5kFi3oWGJB({u!T);3u%>AHpw z_#S@Px|^ElRudQ(D~REmsge0J*%QG?tVv~}=f9=6sM4W<(TxB&+WT%J_&m?HJg?1& zP8~E20qwxzRH9eMk04MOp8(3!z*)Ew1WC`PB`*f;#ttz2vaT_o34 zwie3!wtWBHwKlpsf59Nb1xTQuP{7;a(+re4%nec5j?;q`Ppc6|)vgQr_|X@Y>dM&G z2MTqE4KzBQ(XR}Doh^rH>0*!xR7B5nkO5jT7F(M4aOkGt6!OX1dwWtz$f^D)+b=eN z{%7IXMy5rFML+#Lg-aOAyoZ7YHXCw$?3}numIKM+G$w=aB${{FaA8Ik-D4KpemM&i z%Q8c*)&pX&dWx52QR7YwlozN{WyYVvlcuA%Me)9y0^^bt)jgX$(DZ*RPV{JE8AI2j z^jfUr+;lwYor1hQnK&Ox*}RP5W>kj;4WaE6_dtp*03%m5Hk2tWDeWrPzG}5Cc;z4G zAxf@_AE)=Mp>FxLP@5(;(HnaxA>vmSu7dj!8dIN^vla{sT6OS~MoZP^hkiL%Fs$p2 z#{Vw$i5sx-`dp}wO)IMFQ?BVegcbU`417@pdWODkZ?vgE7N<2~agQu@Zj_~huTs=E zqOR$|DZ0*gF!2pv0{+*mEq)e?Z3H|r>aFurUSo|xn1}391ed}mo|q`PEms?Ky^mU1 z($Fx&gO23rREgVUS64r@i!*6EVlC?4TyA)&oa%TP5W^rv8vs9b#T)dD|wj~&q{$LEL{pwsH;$Ms}9Ds>o} zABKVrzL><8(40+buS04K+wFEl$CJdD!-CtDZTPT@HvaTE`%dkh*f)XQGn6B-ke--} zqca+d;!r4n1HASpJ|>E5#Rga1U0a%pI~{w>Oh4^4EG*$#S*{}XZz%;jDZ|5$Y)Kn< z&jf;bymTAo*XIODXR?^h1r77^wFT-fG)@#tH?srv_mB#Ue(R?maAT7qz>m|Zmx(%0 z%*>^imnd}3<<4oQ{QMGL;r~4B>&NNP>)mq}UEUuhjj5HmQX*HE%l+HsVlD>3hxa48 zsP7G5o~tWSDM|Sd+;o!Uyt$aZ36V_;-;+`@G>3BHmDsWF|1sygC~Ci`<#$XgYgL;s zdoZn#3p z-q5y9JN!x56N6hm=dnS8#g0*r2kvOMihawyQa=R#0MA4!Ac&PN)XWB zZBG>cFZmqd5Lh^zULu?OXC%ftse#)$m)emuaE?Z3ZPhEjlFPR&PSel`er-tf513p; z;NBDqQPbsiC;JTLFn(~#;ts}gae$=aM&sz z4pUhV<|6nPz99p5lOluB{8PnyIF5nKov-v=h1s!y557uLCk#E!1;>#HSG3RE;&qQ7ae27LUpdyZIuR&b~kq89hoD{v+du+`Ac^ka(;M2RA@608F0@%55 zsEIEbpSmko*{?i8(18NqvEQS6j(wYQ14O=J$pW3aMpzX^ZJkEf%X4jNzRO*LcXu5fPAYI;1}LO|77}D-BQSA0aGe^i zW;X^F*iS`2wg8z$xm;)yFyf)zU7`)gS!cV_#&iPs_dANl&&M z!$aA!5*k?#({MM1%1mYw7<|%+U0rB`=Meo4emNA9s7O+Duf@jMdXlpj?$)$+;Pm6M z^5tuK$R?h|(s(XZlUZ|)AQ)*to@swPSG8X?yJlW58O_0PUcJC~&RJ3fnr(}#31f-A zzfN134*P7Bpn@WYpS^fyXSLl{)%Gqyq0St<-Cv8!FX4WyLR;`aBRwmDjOkkvXz}Iv z8akanJT4anhOR>s;`a#_ymvL(z>J`JDzR#-hiL8i7yxV54Ml5$TAoF-92y5hV*RJDjs}O zQ*L=YKgiScgkIZVJgV+W+G^!0R`r_?=$ET=bzN!0!fZt(B@MN#jg~5pD{F$BC`;II zTv79$Hl0MI1vq*fU>De+dSOATOm>C=G6=$83rAHnWflz*AHWf38<8Vx<(Sj${z2gB z=<}mR~Iqqb~>=gi^3-}LITPetC5eW%xqTtvD`^F z^1HeyNb|L0!U0?-Kn>`0*|J%lv|mSz7hntU8ir5+6AM{ZJlOoI0|(7Qp(sVT;pO8G z7#d$b5)(&g;O^87OI4VD!$zV=%TX=K&?ygxe^bq4c|^GS z;}^v*TmNoDU*+xwxRG$97%e)FcpciYLZP^)<98Ejhx;FhSq_;XcC2w z+{6=+8%k5%yCy7jRmj1LnyQ{>NrK!Kx)Z!tD5>A<85>_IAC;z{JHP6%PAGF{P@TiS zV26V%z+YEgX$5qH&u5_{b~?=Ust4=h-}nmiY2JnPhqtn{qMCO?2)TaH)q459Jo441 zk~Hxxfm1vuTPD<&Qfrc{vD}erT!~|XJ{{%0kYro)F zg!l{RZexB2eyw+jRiJRIJ~a?r)G76#sml1wxC?9}Ox)4vBYnZ_e$y`(GUaKIS?&*3 zMFR#Zz_+G1opQ?RtSDNBg7<0tbNm=QMhVqul)2bQ#WhaEtxYor>?i*o!vHOG*^dKJ z@tUcC5s%s7Xkk*a*IIdX+fxCt#vcsaFXFiCtYw;ppRcfayBV`P;40K78~l^+@;1$O zKuZfVTt}a;yv%@)pLX8P@@@`^i^JrYbFd5`hZ}U(aiC77b&?|4sZ9jaR0NU<*aI3aW9@v$ zTG;b&sA|h*zXmVXgCvpxqaM-ymO9^@V{2sAiMC9H`@}rhqe^jgvk_IC!1LFEmNhfJ z?l`!AXelIv_1J(J4w1Lqh$VNF^Y6gsyYC@eQT6RMCOrvr1`lZSd3~C@W%dd?K7RqwYt%2V5D>Zvp5jxIsnC-=txQl znMI~ix9mgb7ts8<^nj7Pq8VUl8Q$Gf4=^vhBeMKPLwJpq{oIM74*;sVppC30Z!z7* z;Uugi2I>zg9LmE?NIt`t06`I8Go`Lt(b|o|_{VeK0V4#3HN5$8-qkF)Lq*6dxD9R|c%`~XWpw7)-ttKpN) zO*4f-aLjt?Z$k$;3|7AH^1AOPyNjAG&UApbGG02$IN}0d^+<8JHkq1(~YEJ zPHTC*RbD~SA2kRbrM9@$yRT~!+pt0s6@_S{8*GhtW?FkZ+KuRjw4sVUE!raljV;#i zMs(01`^n{VQ(@6sH@1Z!K9ny$%2ZyYTmPUd&w|rRni9KYylsRk$*BbcX&N|g6*#>A z6!Dq)7Vc1+UHQ$cAkQFCDpPi8KE-Jv2IGaLvwlL@za~@1#ZEB068Xu)MGm=QbB=Gr zA##sDD5OOIu5c8PAyv$8ZMJ&ZerkyDI6@hWR>|O4!JP?T2FaEjg-~M($fQ4n{#x&` zvSL6`Y=78|st=@TfT7^YamLJ8vRNtd^ruA(9NO#iHS@rw%v^-PtmRh0ox_&T@8k1> zE@&o1IAA0C*DLb;7sBQYQFL^vR)0;`h-+=YQr-t%68&}B73IJQ?gKn;@YPjplatPx zw&EJZVbwu72oOK9TSGkty1jAJM1yA~FR3>H35RRu^XBrGWlvwdw4o}71K)(lKx0cm>cgun%`Q&~H;K#!OirwUfp_qQ zA0N*2-s897$~ThwMaUfFJcjrL&5=y)2Cgty03u-~Fc2Ii7lIQ-*XFE+lCU040{|U@^d4kBtShWt z+^AD0z#2ca6PhT1F~s)fL^Y>sTksW#5?IRJp%aF5-DQ(=k>lT=#K8L;jcP#Po*Fek z`lR`TIfC4{Ltw3tXBQfY97V~_-+2ewi2}aK(m=}dxm~^5AbAU1lGBGtHDR4o(+-G5%B-gwYqP_Ru0_L&Egwdbb zZf$f8TWjf7Pc7wBZ&dDgTZ7`Qqx0A62RDezd_^ga9#ktTj@x_HtQ9>R(S^^E9R9U{ z{Ia@0Lc^Pc&&2G@iBp9&dd+N@5x{9`K}K>SFvR zqcqo~xlQ!%9@XCgi7{f@A*5d-8Rw{tR!g&@kKHik8dEB{2qNk#2})9py1p>OP;Y@b zqxHog$?pu~r+~9(geB`%!~v+e;d`${%5D%`s)vP_L8ijZ%3xhoCKQ`AggTo_vngwE zaxDvn=`I9#gJmaVY6Vq@V^sUZ8Dfao>De)$3M9A0TKbGhRw!!Rwc0et{Q25AG4zTG zIG|Y3bQ5!TL5hc+5ERULsmf)7%-2nq171rx{qps@JTm2OqDlF8(? zT#kxG!?O;cTcER#kys&|Sy*sE^}7tuMV3a`PIMlRG#Kn6^$;POZ~TkbmOwF2rHaXz z(^InVl37Y`+S}k#HPDhXUZ0d$z5buH=>-ImQ@U=g<&fyVXReO3BC4)Ve`+crMReC*<0~#i%7?F_L+KGUtMU`i4h-oiX&?V7H?|DnXZfL@F)&3&_oiAMAf#hQstTIt!}={ z0e{&E#b%Bs>;#I2QmB4>=o4$w13_Y^4rB#4dq?Nr`7*Bjnj2Rs^DJND(V~REGbRHh zNv*=rr#V{YL|Ve(GBwI~fRsk2%O-ES%hjxJ4ZTn3H{^&Ve+sWX`*ZvgpU;TrI!frQ zeTS36);OJVcjQy23+=Q%tQ*6UNY9eY&e)#elg}*{mDMkOT;PEF9ow@Xo zS@MJqol9+T)5|2e1%j`ny{YoJ$A;0wx+CBR7|C?J2kXs=pmdTgZvwR;sARDIyfGmp zq+Vzc-AUkHC7eBkKmM}zN2+$XTCN7=Y6UF_WdR{f~|D-e6V92gg3!bmwb5hKNPNjJeW48Rj}o!r)>^ z)Mcj83z6DiG735m$HgH~6AIs-mHwQ~C|S?U-^OQpm8b}hVgT5JCnWIkGt^^5$2ujc zBLX$$N>JhK&yKRmM#Z};vpSKNf_Vo|To?E%M6m#l{%o|U86j98w-$8Z0_HYlo?Owj zyeq!z=U;P5Gb3Y`Uf4$fPMR9JlW;WNaQ|>X3!2A9%rY`Si~ngu9N=q{go!?w7`um75=o@<_#&p*G8ji6z;>fED8dU*;bnO{J-o`GhKa{~#e0L5&%HH5N1s&( z|0DX3>x~ZUzS|+{!V$1DpA3Tn`lyncZ`K3V|KuS~bNmyc_f-Jo$u+LgDL3C7?OO-XIxRuTD*H!b0zLhUN=GkSCNXvXXr^*%1!^ zKsSR0?(g}S4tYucnv5`n#7yBO@lwm8ZX<5M1cJqCjhXT5k+JZ_w@#B1`O_J>rMxUh zz4Wy*nk=|zxqw|%B?vZ8X=xs|(MIvz(fq@b1MJ(H+)H@6XM@Yul$99i9X2p!(>@I3 z(MJRl@~k!0#8DUEtZS=Zx>RyumVWM(mL@_QCf-R-JD-eP#8=lx)}y0#-4d&3No;V& z_@BK&)_kKnOdPVJP@1mvHv;YGYpLe5gv=lupWVY+>#rHt^c-G>KN|(@$>)S(*VfiN zfP*P8ktGUe^CIl{-577%uE*ATQHQY_7xgfJ!l{5Q9lXwsL2Ke?xPJm+-8>dceXeV> zun<%?P}W&8iu^8Vh}(GXouxshA*uq8?Hx0QIytB)mJe7Yl5>jN<*bb>aK^@c8FZdJ zG`KGNnY!0tkW~1)saZxr442$)TfqmRhp(PrqVgtUYdnlzd<%+A@t+QL{o`qmSeB|KHC5OQm1JI&6TeWrk8cuUW$?BP8x>lpf#ik!+cB zOtq3z)_&If=(2XSLJxaT)&1w?DQkge!QQ+9s6nS99Valb}gJ>+~} z;I)906W;mUUN|eH#?>@b6DFlT9YU_Hgz#@jOOc1$Fv;4JHM%^GI)*xKSyYEoApI6A zepBYicnXd0+iCg<6NL*)dcIMEk()e*d%Kiy_=dR|0hGDbJ`ygnapcE}-Zs$8c^TlG z4nvrwh?zd{=L_?NkPhP#4D7N_N&_^p5LD+y$`$O0-3wO|L{~-2;E@jZsIghTY19ti zBKd&57+GH7wC4kZ=v3umQF6${`3q}n=$W=>4>yS(a%rD^QnX>f9HX9 zEz$#P;)>1f-0dZ)R@aCdU9(W*D%J$#b2Ixax$~E0N9E|R${qVx%qzKiXbmuH!=JAq zl?+pK(1342O78vKPymmC7tqOQvhQZD?{~o#Ut)MP4PQo3a-v0Tgh<_aebLke(Tz*3 z{DU`Vu$G+iSOVx)O+_TQOz*nV%`XY5&{rx4L?37v9+CQPqa|rXWYO$4q41!dmcw^7 zrZb-pyIG`GH$?eDjQBo9Jngd5=O4L9I>#vR1@?ntPgN?d`pt}g9)(ev`7{n7kHnw`0t~+a zyT}IQgK#2NzB^h#dAncB!)vFx50R(8sUe|pR0ETZca$Tl>^Z{WZNW8aqn4NgeDMXG z`!s*L;3NXN*&Sj~-u<>D%NGDhl{A^>#NwD~F+fB_ONtLBd@)58qo5I5c>Qo_roA>c zUz=GnO1ycJQ!q{ob()}4cN5glC4Vc4Ujmj{PUo-7_AD?fTGPp~xIdAe6@}kkLz($^ zDKl@dJfql{Gf>v|v>4|N)NSF9I0qGBtDeRk*-$_|4%|-6wSu7~zwKodciVtWTk!AM z>$CnN(bDT>pe1GIam``6SDK|^-(X2d}XcmWttSx6nZe!^9 zZO8RoUW;wmPMR3r@?-V?9?<|SZ$u>PI;gq_Yi9(RNhn@dJEzSjQuj(igzrg1Y9RTh zRmla~L*7dM={DKk5^7gXJ5^SqTW2U^OUKI*9L(U!XF9n@9zQ8V`@LE_J1vz`Up5&r z))aV0z5#RZ7EyRCHyo-h(zcdz6ggliU6d`@oZ#R?KrnzpT%fIM=M2Om=1nK(M*FF7 z0XVpfKt&F+0j*}Qr;0<<9L})l;u6c;1hk1MN3Tbr*&@DZJ$DUg>Q*i9Xm(MLM~O`v z1;m;N@>!3Zzq%VF3l>A6KpzrsawP{JI1>rt7J8K7Nj-5d$aVCpweB}3E?8z1(TU?) zvg&7>5ymSbWADG9Bh2glyFCal{@Qd61c73ZfYNvA)(^k*r*})E^R*D~YHeH|E%Iem zx%WOL-NZz6wDug;sRwGMB3R_Z=Gf>2GY{p#etIwv14Zk`1areO!aiiFuqMUZkkNq3 zMB^^dIZ4i9Tnx^x1H-JT=XI7~AeEk4%^t%KmtEv#XHd=6PH2<8qgeo^8N?S@-aYZ` z$q}j&FWL9|4pyTTsL2#CapmJvb>IJds34lr=c@o!RdK^lq>ecT6O^?7P zF0gY`rjrLYA{OjBxSBnOfYNAJn+nH-Ft82yzwA~i&OBfiD~Q`h{DbB*P^}>Y26}!c z(}VlL<}xwe_yqWvK2oL%8Px9l$hxPEHm^n+RWXU-M-|s#H>T4fK70<>!6*^`on?B- zUYrqdDhm~@SXP)?5%G7a{7&V4MF~a%K9jd~<=1_}RUJOFtR~9npvFxWB>5#0An>U5 zL)>>h6EPRCAc@uJ%r$di)@LA!RB!mO3nHm;+@I-j&}nW^0o??v1W%5ys1% z7YrKk!8!`&zi2uWW}t#CtKT-j^+uUf@F7$5=d%s!A&X772h-4@P2h<~=c9*%bQ$fy zmD_Bu5Zka*%OtR4T(i^LSWdp?JG4VX-%5Xz0b{asy5|6D9FLVZJTVU!Dw%Cg9yOzX ztuWp{`ZmQi6%a1V?JKKF3p18T2Ob>!>yA&GL!a&CEFpw`)CioRPSf%)AkT4k>aWm3 zkxLfh((`7C?)GxPU2fC<8R#y6Wf zR9a&}W*U?OLN4ct=HvUw7=Cse!xiUmmz27lg#g@sq#@xi{qN{HYlEvn{oQdd-%GtDhEVT@>d-$wfRXGi zR7{{Y)-Dk;1?A(}4EtBGb2#ZXnbsG))(4?=bBZE+ka6~?cI5a@{KJ_bQ(7m)E;ikL z5$w+hU0}GCE|c0Z7|2P4^@##wD#zxn`C6@;DZ29eGt72!Qa_VQjx(FzT2v}IXGMpY zhmvFWoM>c4Ka?n4!_Ik|@-k195VQkDehJE;iTuOHGB+v-t2&B_|MT7H57@Y%V2D#h{%Dk%XwBMnJXT0S|)V3JL+-Oc8WS+X1szAJQ21bHJzCK*9dt85;rHWKsyt zZ%y_bS0ZC=o<+C*VpX@o#$h*vBemrNMS3)8+TVK-tkigd^M5xGYGzV|o%l`Lt}rLX z#iX1e#6b}C>b1#o>E?+d-Yjrg&}pOSmC`#73PrvUvzjfaICfgN^Qm5Pn8TuxU>+5P zr}1P#{9Uju%(R4D0Z%NUi`V~lH_3zSu;wgovSQ#D-?rV9>8@A zwOM2M>C+VHU>QEB8ZFXtMNWgv;c^jV;od8dEB{xf`~Ooh+#mOL!Db# zMOOmiqgf6*UT&L7@7`REaEmUTkOt~L7qtW?`dZ1?xps->3D@X%s8kG}=%ADr$Ok)#X*;TU*W$2~?UQ#mM%GLvGJv{&L>Qv{Pppd>+F=FrbWOhapl`wQHskC0sV8XAo^9)4#_fEl44Va?c>QeiEoLwmYCc=o#Ys^9P=P$ax&72S4)=|izjB3NBF zeUWExxb^UF&le!vB7)xdG;u60K3TE=I>a(jsU#hAivQJe$F0cfiJH({=VS}buk46P zu4S4c6?3Q!I^IBWzOP8Qu85_a-4C66Lb~+5pH>C{Y=Q4PIdg4+q{1;e<|eb(}deh%k>{b1gzvGX^8! zn0iyirkM*3KqkN82JD;J7R@p>^)B?cFphu&(8imk3(?8(s*^ZN;M@S0&uDo#5|=E{ z_IAi&sIeN2Sdsd5sR+G0RDz8i5XYSAR05|4f_~|~5s{EN45OQ96?zj$cay^3n_w+L zrL8*I8j*wzGjLvgwheV6iOrc3p2?@Z?*omZPv4NP&tRv(e49^f9eVj@sW5J7x8OU? zUoZD;|2JcXbm(_dgUmvGIBfunwq91iXKC}B^E!nTWW&ho#j=4mlkFgyvU8@c+C8i8 ze8ya1q{}*d0FyqtS~uPCnk*UX?3LldkIGW*)$wDzpqR2}_Ys7{Ed7A#Et!^we6Pnm zs{MX^c7tc7$G)k0BH~2fG%*`cxK(6FjFX)b2`B9_kdS{no`GKlWU^_Q;mdkG3^yuh zbX@4)G-SQs2a<*^nH}=Ct*1eeD6PBm50^wFqzq*I7!kGhfbrI~;Dp6^?rp>u!4;m& zp%76 zICFHhQ+}+%C+fjtZQ2Gc%u5vQS>6tY*U}AEBv>yIf$40o^3oTLDdWy;uSd}ONLH=8 z)4{*W82ynjGSgs7f{z@u;blC7xN2oIkOy`WQM0H8LtgjowD~L5Uc*4Y8n(M;W>lGq zGp_D0Fh`q|_XT44Y&r2Mnv1b=W8xK3Hu#Sg-1xF4?BEL#xZM1{?F5gifPP-ZIpCm&)B=6xFe zy|;xK9V0xHJu&>V5d)rPa&`)ifpiBb#fORbgNPKL@vFn#ulO*NzTxY@4%#WL%*?=ln?^ znO2n^nOw~PnN}mq`RATJ7B(go_+uk=+XyaI8HD& zB|rc$03?xQi`4*vpnAN&{+N?_s>Pd;YnZTqdAV(TPY&Q)0N7x6qUbk-Uk>mToB2`C zIWS7_-Qd!D2^1IV8Aqi6TnpO1+JR^xdXOv}DMfO)nhk@QSm3n?!m-F{pHWqe`wiBU zCpq_W=ZM&V&XLysi^2Wdm#kDP2Uu#%af~j{MS2Cz_S3pZgp6BWRu(|g*vphQ)6bY zzzG;g7z4G%sx|4oG&#*Nm46L%yh@bRb55s<1HPeLk>=%%+qG!OQV>1iOc!1QEv+N9-J_`DDrKn08C+owlb9Q=peA!!)4t!%HqVtqfC{;w7qYKWJWLEeZ z9x;H=!^zFvjB1#Um7B@mG#YxMJfKKKWhv9Xo{+OFJul>@K8WHOU#}qTh0)n-_Xa<) zvka*>qEqAs-rr=GhTF3@klY@c>wi_4kVblAZeq$$wb201k@&$c87(F&3bwHGf^@<$Q8VbT|I>M@sb&i$aU(_$sW$f8rd(wM&k}$SX zC2X-O#{cu@xhvgM!YPth;{EN&(h^g^F{GwDIRqQ!G*vdBx7L{P;+bLC3%KY=Of?1& zM{lhM2J^W2CgYJ1g$n7Ga%5iv0D|(t3SP^0z)Z~0oPUZoO99%$b9e-}AXt}dawUY= zb;*naC=(N_lQB@haO&_iY0_QNVAeZ1#>AoOAeKCq(iNC!FnI#KCYMopv2OcQ(E}9; zg&SP96vzIjo5^=;(29k*RnW2KV%3u|7PoH|T(CO;7a++lI}a^jagJggq>#;IviA7Qa9+Y_YPSW|p5(V8chhp_UU#HtoulP?hUQrK+~9rc>cGde*1rPL zU+NNxdNW*ls2St$2`8Wu7u0qm&7YGTxqaPl-TyZA#lOU4jGNXm?TzNJ1r8`KAVd)q zQ6IFO$)sM-I4R5PFMM#%!c+YQEUsr2XP4QD2h8kXRGF+J#F$%q`iSu+HZNE0RY%?^ z60i=cDQ4J}^amISnyLhI#iFT(q{TN)uqQgRCJIAkw!=`8LDJ0qHwJY`Y?I`I9Jv>NI0^MhgGtbK<+yk(7+tM)+%Rq|-GNk6R6 zDacSZnUqmm8<9`B({=<)QcO$x>j$GL)f@+h>b_dHKLH6yLU=q*~s`$1Bl{N7aHH`r_)QXw;lm^7Hwh^_EC%qNizV_RY354Iu zG6$zB3^Mxow{NYWb7ElHQ0zHHZZxh~BGmej%*UCQw$BKk%G!_lXZs#yh$qU4AB>?* zA~~2p{eTcYEIFl6ZW+J{)HYf*CMn-qeQ!k_65{w}rGno?BnPy8kHMmg#rLU96<#j2 z+;K$O5&jU;oVy{#@NERqer?(j+cW6JyRSbV+g|H?C6Td?89U3SPv!?r)kQp5LtcR? zPqAVD=5n?v$>D;rpo_oxYwnL7e=aRKIi_zw;9^iWxZ5p!uTG!U)TBZLqw5WLVb&b* z3RcQ(kx7L(1!L4;B&GZDf}!ilwP>v@wtaJk_PIp8yyP`t!Kk_Zki;j~uGT?Q7wmD} z*R$wRE$LOJh(@m`)0mYtDPJjF+?x5--UrVHI}49FcnMaPm>%U7afp;%y1dTPPOjy_ zHpiJ9G?abgURK0j0;larh{3D0|e`&^8pG3tBHf3+60g$ z5Mr2nPt{p+Avs?rWww>X8I!w z6jmk$Gl>=m%(7riJT0s^SVI?LXP5f=mar9>e0kxHDA$dy zL7dc_lbVR!4L?wBh^MQ=ZR{y<^X(CibEwymI?FQxw{7<=t^#S5nU3YG?@Vmyv=*J= z4G+RtGS<8ZOC#yh1vw>G8r!m0ibH|j+fGme<*`iNy3uruv=uPslMcg8fOR7>)Q*p) z$S1k_1>nYM{fI-JIuux2%g=BLCr+0fS#LB`R2t^}?UgR9?r6RN2l|KM{2!0YgKfil-~x zu5rJL9+j@Je8B{&ya~<&Syg`$%f929HLQh7OvHyFw`_y>+JOjcMCy}^3$qJTX-*Fj zwWj0XKo9@_kXrhTBYrzsopuJ7eF95r{DAh>`{@Cpq6xNXgP&Loab#WFqf%<0DdwLPDiOo-IDeFzos z1gj)n@A#aw4UK0qfyw8`?3_);9*{-35Ig-*dq-QNK&tOSCi%XiV28xU+N;2=lOIWq znZ+p6^>)^xy>s_R#R<*f|KZ&k1Ws1{wo(BV#KsN=Y;&{QKyTg0&Jh!bFGK^SnAB&;bF ztpue24bu*dY&(9cBF~WU$WN-BiFpW*JR!(Urs^RgnZK2PAmm)Jh+ZWFc z!qNuewoZ_{BBal19O}s%&z4`k#ZfcXzINzP%^A2Sr zg|w&Qv{3_wHI_ls8e5U|g(RR@8gdz55tfYqofgTB`9q*91Bs^)@iG4dw+9$UlSP}+ zG|p>y(mRGNK=!v{PaVgOmf);$R*ZTMdB@ z9>WYDhI2C6rdrVk2I)5(8NN<;oM67jLD< zH}n<(i8b{Jk#azq4iz^maam*C_CA#xptkP5iAyXf&Y%ziZ|3jhf(3lKycsXCIlEg@C`qj$xGUUh+ zt5)wXDzuO%!NJN3?L~;2>>yh%aQb3A-G~DimO|1{LvlrUnG~kSu2?)kTPpsMY8{_N z;O~#}#7C%YfF}Z-mlrRRTd>VB`93)K6g)3N#5K9eozvzSW0GjCXc7VkBbVFcl_Q*X zUDTwJp^arBtu z_=29_Y6Pm6;-R*NM$ws*!+$4B*H?{ATqkMMVZi7*Gqs2cQsFTemuNHbm|zzpsZSx+ zzf@+s1G@(rz*)*V_IWo73PftE>8;V;$go4pWb7ASwpFKYx2gh1^*i!bARhx2Uk?%G zSDPAh)6&&*`0*iW-IxmmNz)eW)fRaAuBhz!=GyW9hsImfKKirSg>@$L7F-F}LE%F? z$;;LesE|bVzZtrx{k)|y`M?b=dpN1rpPoDE5{fbEB1Xy`CUL8j>L+p{|O6KT6(@(G-Hol;ldt|?Nd)RqGP}JVb#aY6S)v&I3Cbeo9Ts1b*UHq}vYTiLbW1Tp&<-V^rc`6*OwQ{Q>=umI7t5EBN{p_yj&}_XMgZWxCf9-bHdwG zmh9w!Bxt7q17vftJBANc$yNqQd3<60oh){WM&Y0QG!(AbKMecNjISq>N4G)bn0STt z2gWyFdF&;*6JNO=Aowm=8z-Sp!X{y-#@QSQQdNh{BG`6Yoq=~Qo*BHijFcjV zl6j7#7>NtTr#xoqf1b@|>4|N9A0$wvKAP7iAQ!2tmV10XOk&^|%J2lU2FyfL4(5cP z_&Z*~Vwo^%lA~-&o?$HvCLxxT0+>|V=`{oLeM})3QiD7;$BzPDbDZ>0x?6!ZSiTq; zJiR3JQs0)MOKi8@d7}xU5>`QHXD9K`NaA|&*7mD5YePh0@kzTlClDvqvh_dyH>_PgPlX(FwjMHA9s`Qghtbs**-W~hbe@c8=;4S zh*}RHuc)6rrnaCW-{^f9&Ld~I#1t_*^!i+KosTOQH~{?lojXF_$DfTuqP}IY?Y$+! z|JJOjKI_>g#MN=jeI;N4Uy}DQ(jOtw!V6`w0;Drli8ePmHLzB*hFRQO@lixn249p9 zDGZqA_YXK7=*TIt&|0qj6tLuoSBsgs^i++WUy(tpI6T^o(eq4ee#(uLJu$gkEH{*U z2;0*=w552Z&DIG*Bj;8;>Xm_|#!5)5XCKj!+azvK@R&d|VRr%@u%k`d0pFmJJb+$g zbzea(hrq`&{VR=d7DsSjig_fxl1-erjMnb6?h|?Ykvapn#T;?y9c+{-teCvX2?k^r z=6I6R`PipqRU<j9{kUZ?rmY6EkpWA3$BdtIZGdbz7;!rvXsK32v=`YL;eOUu z_{0u^`fF7Erja$ia%MXee?n4|OF>1L9vEcIlfDnz0weN1YcqAYVG@~_s_pE~lz%;1 zpcrsnxtooJo#5SwnsjSG;{T0gOs(eZ6fP~$T4D@^8>X@HN0 z;oVc(M#}VYX*jSD;eoyiVkS(G8|JI^mx6ntoM~eUXt=JiD`KlY;8~?T$U5wG&A z5Wytl;T>=Y3m-6MpRv;MCdB6Gya&bi zje_BO0R?SMXI_j4zCVAS!_p0_2H_*Q#TrlJ!OA^RVwDG{8n(UVsZFKQ zmZZ$S$;F)m!oyQXkYk(156E%&&X(aL80nkXEXn;-HArQ#<(;pwdjb_UzlnpG#UH@P z5miM(h%6Iy)AiU3k#U_G`pSG%2>AV>{$FhHu0@w4+xM3GF@Z5xPO;YNsR%3czgZf;QZ2FEVb_ zD6Ae`#OK$TqtmgfJ)!{)i;T`A_J;N^=UNQ0(6(xo9Y^C!SFvkC0&N;>d~^TJhJ1+R z_Cw)8wSa3Rxc#`KUB~<&#+w3($ELt_9ojr~Y*FmTK_H~Y}Wg)E=Q8XFry^@Eb z&C1pe>@$4VKc;!u9}f#Ve|(a{cSDg859GQ^;2Il`B)B!-0+Y%(kpLS~EP~BJ!;BZ* z@Ud^C1uDYFy872WB_vv7go#+<$ebJ=6m?c6be(>W7_ z7t7@(Q{%Zi-!-;e@Du+FfdZ(hVS{UUE}bG@oE-{y{FadXpy>Hp7U#oeVZEt~VHl>a zkA-^>B(5Mf@ASJRY0uEGc2ec!rCH;VkP-7l|6iX-{Q(%CU`2Dl zAPNmn0r@?c+@K{W)`w}ObUpIguT{wCR&v(=0?H7Hb(<8tI zt)T^eD_0&DrEi35y-|TJ`9o)&j4baOf4?>F4x;r6rzJ5ZwT zixu3_l7bjwzV5Z%24guwcXQZ3%mk0(DHaDQoM8- z$pA#c!>iu*CqxyYk&NGkJ4iJHS+lMIVAv1i#s6&+R}+`cDOcZ$f6Wg;eWi+dub!5; z^_Wg`?WWflnH;{D%dGIR2#b{)(rTtnp+GHnyBM3RM{4grdfz5Xn79O@Nx{$+{-V(- z2}zTmMoSd2z9{@3z{&Xv+$cT3k@3MjX)5vi6)pDc>|zlM-1CqochpJQ4hYJb98md3 zGmx2C__!mLSA_L-QEH_~N*|XO@+?Lw z)KN&!bzi}LlZbmVc@en{8W^Aw;*vDj2FLu_wKzK~=R z%=0(l1XU;w(nmq_Q)qIceKX9BT2fdJou4n*#Lx1g1+3!=k`tM?im0+%PB+HZP*wC> zxpdjo1D#^a)w}QO zCEtu;!@u@vzwTAkE%w65e|j9llb)_A{wZq+;^-4g>2-+iHFGI@8c&Vh>cg{lr|Ya< zC%X0V*b;tr^o~CMzct8}3{e7fBf88O@*6}x@>Hvt5AuCn(=GXXDXa*3?=sc)O-tXG z&&MB&*fHem&))883D~Z^N>|4DN?eE3DaxfuMLvygBJcC?0a_3QJ3I|o!W-gDWeYGV z?FYBN2DA_4qNV^nq5ig1TO7Cb=L_a0N`<)WmPW@E*->s8X0u_b)F*b_PD95a|wldu54Z9?xV-W@Xc9OR&zex z)_>saKyqjG>I%x3KMIgSyc*rgi)l%?-*l((ITet!7|U-a|26(M##0(emOL^fTBSvL z;S}HqFeiIT;p zLgiGY>TmSO#t?eEN{hzw#Bs?YgU@OsiB9*&^9Uenx(K``v@{96>>AJ0N0B*q@GPxT zS#dFMdAID57uL9S$9Ul%L;}?xP&$SyqyA>RqF>1M;?s_(vgI6sL-i9i! z;4m?z7cLukA6^FgonwRGUhW@J^z!%rG003-M>nB9jpa7KKBpC#- zFhFA<(Dn9TS^GLh04EZPSy!7-!};`obe*Lsj` z7GKBs5SyaukNY`CSGyzDmy_yBDpHx*_LuWswrFJ>Dv$>E2pRmWIJUEx)GJRyJf@wg zc*x68VoH4`n<4e)~Bw+v( z?PD}pn$N*d%s|;hvm7$qEz%iOX%&N7w;)(z{z#b2y|-y)jRJQ7?GY(C5w!N|9Aw!d zKc8o-bg=)^srjtsCAg?&vf=V#LulJ9M_Vv!h&ddoZX?Fw@w3(=6Fat-;p) z*8g2XlRl-=JU#boYh{=BA1x*qk+m4`%Nm4@ z$xGPB<0p%Gs6}UCF*YzHBRw;-jBcT|p#a+p^f9|6dxJt!SWB*Y{T*?7r9G@@eYU?E zF?JHEu*XNXRzBa8DfhRIfL&9FPyxyYm-mz5R^fmhDQ2RJETC=RQSWJ5KH7XNIF^CQhq+$eYGe}@ja#!y8 zLlNB=>!kY2K+W=tUx2$pMpECgJZcbNk~ zSZn22hsD!W-HCgkX$CbWWRx>#T7_nJ3E_!H!Af*$6~!o7^ft&OW}vG}XI z&8PZ|(mK@bYs*NI(ET-$0!5JaS(7^(=U5n3|LWdP7{yKgW-w99Vlo;H{vioT*{wV_ z^WmfYyb^nbWHI~zwW5GVMIN$v-T+@fpuh7wabOb384^)vNgg!Or%So;kjyV0da)qQ zihYpnJZ+1hq2YB;JP$i?Fg_v4>!_Ze=PSxK3Vm5`TIG6SCjbzx620Q6rSdj{Zmi5tQOC(q_h(G238>TA=nO;BPl6Wva>{ps}Ta`G>Fwfoz~ zPchCzoRpCq7$&C6P+?FJhMzYCgXP{6EPMbZezMbHmY@Yr0H8ns001^O$G`&P)QGZ; z4eOe(ZSTA~R6{17DwnYp6)QAS7J3Cd>fP8}D~-N++TZ0>7Rj|Ne~w3Fo~G zSp|ij%jrWVeK0nk&`YU+b(;*TdYt!=l(KXsM46&%i zc8yD}HBrRW2C~Q6dQIGm(GLky!v9|hP+pP{Kz-hba2ZaF@~Vav%cZEruJ#B< zz?F~#*Zss$Z0Vu@{A`U*tUmw7-`_KWo0?}wobaFi`y0jMI9t3&3+{;Gt-Nb2@O3Pk z@+b`Amu6MMMI?QQ6KhpnBEEEyb}saE;h?5Opgf#NZUd={IXScR(?RESkXbbt=@W3! zk81L9r4eL?nYLx&0sb*fcI0MD)>h^IWkMc*ReC3JFl0GPFxrJP`UfX2(<055zyITq zeB=c4sdZdbfL&9>Q$0P*IQGu%Vs@-wjlD%@+`YJ;=je60dBy!VH!M4qK6-5P^t~$o z>=KvuPz&vERu>-Iu>DWe8N03~Mziq?>(M!(#mzlo^ha%Er@IVU^7jg|KGc)I=^$CjPg`Ab*ox zDzzg|2cOahwS7w3x8)Z=!*0uLb$Bz$@16)G#<@N2o}nk_Cet-5jV)$kjH|0)>?t7b zekyV6#AoOE!p`@-rGfUQ*qdA`VnME9rz+I+z3ZJ(9L0Fn@G7$OnGclUWO+W5g>K*! zWvU3r;jD%7|G2$4YV6f(EF0P@t>^D~$6!eyVrl>h)o5iZ6OaJxq=Wzf13a<-0cf#z zyKo87VP8X_000oW042tei2wiugL#z1v4sle!l34*SSOzjOmpE)?#G z(_aH6JbnKU8}_~ayDjlJQ(eE^D}-G`FI{lYKB$+X89n%r;KmlBBhx`8{x zpbc*3bwa7AaW(oD%Swc$lD0Pbv85vT5%i?fLU{}+wQ#aABw;Wsd@dZ95z^4UKmL2U zAK6`sOS8O2a?o8Q%A00Oya!cBV*xI~Lzbn%RRS8incO%-mkGReyac!&bEkR`(P4nJ zJspW;DA(g`F)Kchdkr}b0WZ1wP%P?8ZEt>QDN&fD^R$?Gbi`;G)tB;0=8#g%#id;I zZ%u0KxZmD#3~%o!0_!^!yz2anZDK0*hae3WjL{{V0w)I_IX3{li6V1=mH~f08r2xM zgjAs!AfgNv`9xN>A+A6&te^H|PhH(c)@LAD2<>YI@S&t*fj1+bu{7E-U8twNjpCVN zTfKML2;2Cwplm2zOc-L43JW!(x$1HF>ABnJKo|B@hs}^=xz{DmZph|ckB#4;%eaE5 zO4iX>5{|P$pMNPKkmREGFNE#TAgSN%M4NWJ9E+Q-svORdr7=02&V9KJvtK(!c{C-btzE?RL9x){8nAv`LcI zP-AY2KxVM??4Oe0@q`j4z{$%aamci;+yWsQW=}Poo1vo0x6O7P!4=62&EH^T3Dw^Z zV4~zBcod!+>46{n(3mo^<3ku)QT=&xA8rn3cRPTDug)2g2p1%b$n7qjp~e8f&B=IG zA>k|>z;>9}X-fGW32<+Mjuu3~GuTy$24kA`wS@8A#*)hn$z3l>1?yAdRxfQjR!jnu z_QdQJ*>ZXY+Z-r`Q3EO8f+GLTgZ@Xhac`9Qi`31h1Y8*h;#Xu}( zC!Mr&$yrN#J~Taqkv6i1d6vdvSXAFoV)0jXAsx*7fC@c}EiJ(A)pHQWi0~Gab}IEO z=s+9o$qq$B6Gc@n(~Yd?t%|+A(zn5E&ehQ)JjMbRWa}yz?_!a=&9_yX0UR6`yHlYV zrJP2|#g8AX+h%#B5!(3eREfm5A+A}l<9XI0`XB4_s^rY#N3IQTH^I)*9^r-BUv+hP zd75b=ZNZiMLc6F;8%LOB&+)m=b%P0xhI}Uq-*%Im7o$2Dx0+U{YOd9lI#s!g#%pBZ zN0uemYl@n{9g^ezdso9Z`2fNHL)=r~WrdVX6WJa>*anniJ6$vd?8aWmStPk$YHW@G z_c3B}H*(iGIJG2cY|w!hElQiisQ;zYBuRG ziK~;kZj04dH@ViZ2a9*V{T@W^s<1>gyvW1d=&7dOb<5Nv^8P~my-3#O_)0xim2$fM z%OT!Mf=T!6+3oWNo{IBimd;f8j#n1!7YSl$P;>uq9XRaAgqWBBmcF{3is_^P06CZR zE3KvM0DuKi1^@su0hmBORIMqW`yw0wFMc!CLKa?(X4%XDx7d0x8S1^$Zv(k^RHVpl zjN`xD?V}#&g^Ert_UvZk#$Ygp6JY|6C7Ro;W+q;X;x}&}TmZGjr6aFxWYP33l{576;ngiz{yW6HW0C_7o72=dP&jN< zq4ypax}OJ!jt4eHZ(_0A$!|g0KQ=%jqukRopC-0VwN@!MkHrUjKGQ2q2cq$@h{%aD zDXWYw6`#0z0X3iWK_opKl<=>C5nPd~jl#PDf9(=<8 zvEs*)#na3H5@jg;f-o?ynLVW{Qgiv?c(?;{IK`IY=D3W~AglrX4rw#Qg^Us)Qhz`Q zqwPbyI}l4~CRxe2%WtIP1H|mE8{%1FI3Ki? zqMBKL>VNH#zzdmLsIzHvv4>Da0uk%j9-)5xnM(yui~t?9)Brj9Q?9b` z0L%~o60683GGH%2cdr@OqKyR7rPAy8i&~@w2B9abA4(KO zLYFU2F14Am9F4DrCZQSx_ruo0Suj`RG|A7@SfLVwRh1+-Lp=2xNj53FmNO zP_&%sraO1?KFwq9KRsRjdR!Cjgg@EaoUSTcHY7B`V+Zx$Fyw!rD8KT_BYmJ3>M$Gi z(r`)E1T1aNo9RFW(0Ewzg=|-9e1YfX1AV+MUJnGpA&SVxBa1vd#(Sq~MY^iQB*aHD z6|2Y6TD*HZugkV?isE-S<&qC~eVNO5TL*5qi+sRFqCYot<>@*jfN<#b@$*S}aLffO z+&+zUfSUSW&KIf+<{>fZAg@ONJZz^58`r8+j^oy`=HUft*v-PhHn1fqQc{PY;$%66 zglgQ%1yo1ui=IC^{^$m&X%PI*v)-wuh8`mck$RRKYIeCUtIWa~@TpvNLvueOQJ>>L zG-b{(V64NB4m8mM2xR-+JlXm@(^u%?ep~RuaL?c7WAk7IG^o%%`e*gB8XerV>;MNs z09@|?6ulTQFDuDK0NjWhs)f&v>3a3yL)H+~(tco93 zWf`!MRXW%N8faO@01EBkIc_7WAUtNGE*pkwa<^)yU6|goPmXa!hxLd9A`}sYXe`+D zuc;O;M4vIqnobV@b2-s^pKtp;Wf&PqTf3TEx;;NZ892PF1h>ZLRfJQIWhu9@^%hKm z>Xk_OHbJ@1K3u8s=zO7bAJxF9FitPehe9t431-uZ2$BpZ(~wCma-odaxx9cBl4U6F zZlf5Rz|_?>A?1(ARYm`v7B`K5xqmfn?TO(TzghtqJE$=9t@9E+vXrs6heek+dKe08 zKv8YycJpD0DDj?E1}{RycY#<=Ea8lFgwyu?`VgrlawcPYPgwQ~bvx!QsrWl@>dFuM4O7C>OQ`YNCExgViUo zA~&v!Xt6qK_@Hk@j9vUK0w{U(xqmbr0~C4x>`f4+2jnIR$vh%wieJLR|VFAc%8ETZd24qr6@GbFVN?3!&K{qryWx?XKWnllkDBf*n`X zFqVWs@k=wP=vB!N$H{q#U|1+0+ne%v>+l3D0t>zdth_{%IAF{S18U86`X&?!6B-l) z=5Ew<%+}m)pC&`fJ)f4PR@%y-)qZHs{rVA@D8ptbqL;0A>Xgzw(y;Y#! zV`lwH6}fb{QrMZ(L0KX$rSvVFDkHyMzY@Mf!wWM$~C9!lQcu&G;~TSUAe=rRUc!Zo?QjX5%Jq z!*-dF2(TIh-(Du5&}T`jIEP+iC$mTTAxoDOhF|XZd+XanvjIOPOp%T@lAhwMKy(Ds zrK5=0O^81GXrUgY{uk@_PtCxrM~P?ed==pT5e|9)p`p`Vw+ab6i$LVM`^j&iU+%>VHlvtQN~O zF?j$%SuXY8@4WNFfN%V_x>r$wy2UHj^$1Ki6eJV!-739f71m3Pxx2|E6b~za2GunG zNf~i)LCN&+6G<dLir37I~*h#U(qRD)54!l$zT8gpAXi0(?GQgtmtD92ZZ4kTo$EXATS!3UyXV1 zDHtZWtu8+bKK_kSIJ_rC+TMRgaRKVyws-;vU0pn&$=edMB;GAmf)2rZT2RvHe@PhP%&@vj^ z+-aR$4G|OgZ+_RvM3hL>aEmmLE58UxOiX7R990IjK3&}k`5hI!Q{7>3GHG+?{5j(U zP>1Hi8fxqD!qw&T3})g@DE8he>ONXxlsEL5E&36xp(bXt=XN>c>n2G01-RVHK>R{1 zHe0cJ0y-Lu9b3DxZz!j!?C!zZgjvTPKG4_Wr})ndlza9hOO3B>n;7VGd8x@NJz;ML zc&U*DGiyJgb%1)y5}op)Bi>0@yj0DQ=PHYnHZL%;cItd6iY?*CRD+pvy;b4RxP+9E zZ-k$e7kwnIduK0$)@TG%I8`@fN}agW9&9}3^vlPH5n;_&Rr64>>#Un8srJO5VwmA3~nlu zBtMeiWiZdw4y}m?hEBr_S2lu)2Uq`NWEvtiPxqbN-o|3Bwi|*L)i2E+ch)NcKTgtuVRU75&d>Mp%{H&Ki^XWdb zLpyvNm_(=%i4FM^SrL>cXQz%ro>3lr)traWt=IblZUb4A}RCPEHz{O>sUghcx@(KvLd5dpRwL z%8R>6q91yMc)z-^Df4`K#V2tKe>3Y%;}_u+yA^S#;h@7f+2EW;EENJ-*~pyR?^W+O z&BdfFau_B!5fH*rZ;9b6H1`EeKY3Fn1#N%)vJZ6|7q(g`ZVF@G92L*QratjhM4JH6e+qKJ?Xp#ZpzXXH7{Wt%<+@ZzA zXE2wA~_hC``Kvy%c*7VTHtrjGmUKpU&ZI&k~RDgA59` zaPb?zPQ`1%!%#Lr*^w2%sMN8cH=6rt^E9Mu%o{TU29(G8^Nu z2KcvlEBNs>7!ysUMJ=l+I~)4HtcFck-R$Hd_A78T7%kxM`5?!X)J0PTX5E7d9ClML zL>nhpZx>HF$2>Z-@NB4piV98wG^Rd&+rU8luydrL2ABcw9So55zA_lq0CMGtbMS6f z^>I2U%n9=!CKX4TSrb0vnmuYhlk4>U_w_3j^#}ZGJYl+H%H2bJvmJ4160%S7GF{6g z){ThUAIL+rY4T16CZ%#*6`(Uvsi2RkMw_`73vBpRtPrgR7pqJ>u_@KgTje;%K1zsO!OtZZh`BX1>VJ=U_RO^x*?)rM*?QkKJm^Q z=|bsiYKI59!+4C(Z5U?PFWIzbG+QD|pmNl`-%?mCt zQ`)hf0(mg?L|4h2wVG};N?xucvC^TR@*O(JfCMHy9#E`Pi*KDT6TA4h8TdU-RdwX*mo*6z1MKX)@^n@J3z>OJ7Hd1FwW7_2~FdOF0?6udL*6%*`jMp z4K_5>;SR;A$fWJPtA*MCU{ld9m;-9ZaapFts$p`?Rh)OQkQe*|4g^RO7T1U3L(ztv zWQus+KJS1En+L*C`}Edq_{9yBm??lLk*a;QB+C#Z;{qV|WyZOtoy7w&G|%Ax3;L3O zM^ga8nS_ymCsZ65!$@4wG>0# zu3(&a6AI)ac8?v_FIlXBmZ{Ix4>@#K6}$X7Bv@cIc4J&30Ql+O_k6DpiMPIZ zAoo^^7`ly;?bm%^^m;^XS4u|T_J`8#ftOJ8o`qb#q)`eXV*f2mONt zjPI_u3!zvFtA^0vc5bB1ADaZZ1N5CAHVvWggIWM)uKVHISD#xxqs>5YwJ9ZWYp{yxAQjsGx2?0`tU9 z@1@ddSLfEbj{0ne3tc)85;N957ZTuJP#KYMo)L?1cBrFuarpxWNZ1KU4YN{D$1Hv30pU{-taq(HIgAaN-E+x#CqyR zm%5r;TjuA@a=+qivt)@MZ7qt0EnNd)A$o&fSvnZYInN2F1C^ULE@um_16ZX1Es*se9rutv#F?O$-9!5V># zG2Az{RdAQ!w~mN!&pSfiAc1^8fc>qoB_*gNnbG*{q(wx@aXQe(2kKddt*xu!8td!$ z??@rPH9s%@_hB*ACAnpEb~kV-fKI47?EVPiMBetYoX|U4(Zd6!JFiWHOHaq^lX%eB zJPByd$D>2b45~k09lD3G{B}$k7~cTTFbQcHG#C^7nRYG`9wzoI;(0v8_6ddNd$dGb zl2jNKfWg?tR~KKod>;wtoMJXWB?(%AS@BjQ8NFw5E;D=v?~2WsIKmZ>hZ-*){ibTy zcRnA?4pKi91`+Tq?wDq&?7~hv9-!t^7+P72Ja}wGDehK9X!xfr@_hro)dlnOSTeD_ zUhKclgmwG_x%9BBSu-$_FqEd#)X=>Udj!wTkZ{Zyas&9306ot3j!EAIxgAWU1IR>p zav~t;ux8pya^A#*hnleQ{n9}1tpL_q#Zzsz1WNBhfD5!9e}S&#MSEf%9T|zRYJVWP zbxJ%R)Xac(>Q(2_JL??%n&#%3$wiqw-=*EGc=kgA$6=oM9?u;=>#FP9&=w#00vWfq z|0gw&{X30F&}@VU@OB@MeKgc-F=euH)(4QLbMY`Y1N(Y ze+a=@Ojl1fcRT=+zSdWv1yO6#`cv)=K(5YEZ4X$mK5g8;zJlO#6ZrDKYu|p#j}?#l z3L8>1>~QVHECF*M0LZB0S=r#abDR{KIJie1HBbPBWsR~EeGv$9*ugp-t@lWQQGs2V zcu6ZdJY;*1ei^H96V2lhh3Zfm9L>qemC&L68*5^z*kc-~!Hiye~(1=)XjxuIs{|98i8vO$~doxSeX+^%?Ye=|qW{{sy zMaC8oe&h70&TfXYm_VnzrOkI5j(LL>PWr2=L07aZyEpf|iHn?W4)jsrZph77jV~xl zYvm#rk+JEpG&SJIycyxYIPy7141tF?T%XVjY7lEJIig6dX<^jKg|r=IF<&!?Lrqyc z$g3os`WE-65K};6Umy`B%q^6Md!v4yjmJ{r(ltPnC+^8wEK-~TE}3mbp);!~{U?fH z1(nzj%l_KAJd)w+?%#ty$pmOKOykMZK>(KLk~%CQ{7c{6TM3A4U2SaskO#8oG|S`i zj-HmzgRkE#wen8;5M9X&o;25CH|gVKDk$9~(i{F1$tF9#*Xj!IU9`2a; z)z#XvQP<;w4?nks9~djb`@`3VvS2>Oj4Kx7vblhmAFY@C;Ep9<5tvlqydtF!#TLg- z20x3Q{v31#z0ID!MrOGQeI4v_SdgsVchJ(6{kkWrb!{r!4mM4^wTSIb%G^hw3y%wy zli?8!!s5pGN`4yxL)Wg8cwc?^N=jxzPAG&9$CzELS7~1(jfIWXKPfCTZVgv5;GpDM zVa_~Dc4O$rC``FY&XnC&N+UPEeC%=oZ2RY~GaE<)CKd^o`ZRYqnu5H)FU3Z`WDcsrnw&)ZET#6OTTz!x zU_2I-1L@f+9|h{y(S07Z%RBy)*oC{fS# z?{<(`Sr8Visk=DKh=X25k;+`@be8Y{2>0m}}V0W5MaETc*9zie+XRPrN9n0m_Q zBu8t=N$CJ;sz!Oe!4Es7o0sjclI98C64D5#v#a@CT+L3sUw!x@BoKXCnCOy--vlkU z=L6_oGg$78)eE2qppLWubxdhoqrf$O4|G8ge|nY7!e*5KwJf2*duQ$6$d~l$*45~T zM!&jQGGh+{_~!W_@k*PVs`jZkqTV)l5P?v>NJ_4^+hr3gqacDmH@5q4Kv+Q1VhGJk z`3;=CmUfUdjZr-ijXrB}-@Q7rEv+m0ubULo(!B)M?Tq~Y6o3=!R4PL3PfEl{b z7LK9O>02BfMJ5rFewOj&k)M!unJvpSE?`0pW}_J+Jb_Y_>)GVvEyK5GiQL7+w9R(E zArn*dQGtR2*Ykc;gfe@WL=Py1&&}T?q3X2fV3rds=fo#bl_)*D{tylcEAVgr!nFen zf>W)3F=FtZ^=$4H^qXQ2sz3GWj>!pa586ecY~A+8#FX*7JK~5%aMupmGf{x-WQoG+j(S`P&4%jl(#r0D_-%eq+P3>@P zh>m$6j5?K6!|$x2pM150Tchl? zu%mScT-Sw{D~Mu*74Hl^ls?nQIZGBU-v~Ga&F}Le1I6+%MWVB~O=L|Rq#wu3@{4!5!`zex2qhs*?c!W}YUUam3!Sc=h@XJ7?M zO@=nm!3a1YJ>&CJ8jRv!%`qLVLg9(E@XL7M`jb&F$&ccWnk?Hy#26Kq_|oGpjv zF+V2KbqU4Sow9P%2@w(J{4sK0`xDn+Qe3 zf%Ujq-&r92euT&#Bg%u_nzTRw3gUsxg7^=UbyuVDF~9*lLu78Dkua+a0Jkmj0~dnU zp2K5c5{FCS^NmjiC=Bu0o4|_1Sm@Cs2gPIfDYnhoL?umqpYeBus$ol?=K0!M1DJmc ztyqW+{qo@%O?gJP(%^3<%2x4FD^rf8*0w2OYR$=MbOV$Jp!&KvQ(OV=w9qhrm2&>% zzKxdSkUt3i39Nq7;MEY$EmqEz!DMWT-bSI84e(-p$1bj8ytZ&@DyD!}q(G|hUaSB; zhoHlb;;pNyEq>Wi4IVKF6(zKFM+G{=7;n14aOnTs#xP_?$qM6nEB=ln*i0T#yC_^t zE?dU}-XhFvAupe&RW2JXwJoL`o-|D6;`3%OD!n)C31_0!`A3LdLFdv=?pZ3&*GYYK zGq$?vCHDsdes+AVA}(9JQge&ZUy+1*pCGc#e(Bh?^v zV3H0NY&cY!a;f+hC}EAI^1W?7Fc?aMpBHz)gHvW5YD>uW#V~W7NluMk8!V@vz#66B zPD%+2op25=kvMf*V&qOhOpRA`OYdC`?vvnryxCR=o|N8QeV+>KMPC3ehkQhL&->n? z3{eZt3k~vWj`gw5G*xcA%0DtAE|M-oY_|qONu(sx#5y0GqRzmHQ!Fq^IY*1ZP&K*p z{g$@9%9?(rCuNP5kXdglo8+T>%OE0t@UM-cECq?i*kLqh9%{@6v6|`MBi_r?DEmvY z`XHETw3|Ucg$QuB=v8qe03SrvG=Zkabe1yR%NJWa)MjCqNzNOaVHvd##}sU9R0*(! z6~ouF&KkR~exPuth_-6OohEp12-0|)KDX!VqRz8(8Q zmMH4d#WryZD?P#yPZ+bXwr>Pc)|Rc0-qwHv5P58tAOI1)8*-6$0083+`0m*`6nxdn zBxU%g4FCXJU79s#VK3)PeHD&=k~?Hb2jf4oM=S*jTFZ7|K1q*K2w`-%4F%ZL90OoV z{s!1Hx)C4wKcCjt@5E#tk3I*+tWV>~yBKS_4?+i4Vf6>USvJcR!`}w#@F324_(xao zR5zYdC8}N5IG}|2(-1Tup>%GE`Hz%wn9wl#isL3h6Uiyqx4yhleEc<4HQkuJtWAOp zdiIcR7}fatB4U5L!6Cw(PR_y{6U=$dJf|6kn}`GYT&{w=B6gWd`r5HE^>8!4Z)a6@ zD7~rbHw7L=K2Z*Lpb?MXk^FJ0kC~DqHV8jgzteGj&>$!LfES&lbE@cxyum+& zEk;t9`RzH@Wow{RCKJZ0L^ub%b-e(SaN3h6(Tijq2gpw0IV(V{_+^R9L18$~JTih! zK^Dwk0bwe3%^(5HAUjHnoUW;(fL?Qj{eb*YCM$M$4T#B4Wk3U7LY;sIRo8U0V3qms zs9{FxfK%B~U&OT2tw=*>@u)spNu`C&d{p}LWng@GOJm@f_&ar!&}%0h)h1(z0*mxs z)0AUA1vz(+k6rt9I{xChiGZKXzr;X&_9$Uhl7gX_h`LM~@TPDX@0Tjn*-L#VgNR|N zLxP`mX35qTUfch<|_(P`xB_woU6F>TLd#Ph0*77CC}@Z>l% z_ibl>R{96$Yy$s?PS#=j`u5HOFmHgxwN3~NaCpZiV;7w6E)rEh_)+1=IYz-dB*g1< z#7J_xDQO)y#-;iXpyBTCDkVQ{b8{!94Eylu% zCI{a09nk6PvPI1>nLT(;4Zr@-k7}=AlM0rg7RIfJ-ZSy29>0~e5)?B}-vq`#7$0N+ z_(6if*tXuhSfv%{ zY=PU(Kp|LxRY=g09AWJ&g6(ne2r5o;3&?|EMjEdgkQPgjPm};C!OOrsZoaX|&U)9r zuf=o}G3WP#M<;)6Wv7(oyS1EDMJHL}=J8nL+=gY4sFs~Zr;B(QF-Qi82TJhzi*hY$ zM9xoyY%8u%kGoI80eW&ew6_oj*Aq5e+_Ner3`IjWOX0Md=l1b%H0~i!k$U^OlNKA7 z(NY(}<92{VqJQ)O0+&3F=U&zZ?BoG1=f{?2?MXU~CFY!^`b__2)>A zDhL3-O@iC0YZR*Ea7KhsIez9ZruqlVLSp`Tp2lG3h$h4YaZ2j9!A&ZNO+Xi*0a*U>P5MCu7w6B z2^Ksqx~L1Cz24GrAqje%;=MSO468IT00 z)ipo>00Du!KAffou%4rq^;$qdvDBFX@m5l9l0Z6g7%EZOCPmf@eA5PIhUL9FpD;7# z@t89^Skw2WTld{y2h(vJFsngr0FFL_AA|JuQoPY-HvwGFjM71(Rte6FgP-{67B#FU zX=v?|F|Udf&L>>G<2mq%x{ghUvq44hp6uG;gF&tIv#+gw%A%q<0xye?*4t*01zt{S zq0;m;J}iqM(lT{r3^SLHZs-yJ%7vf~5?&4A%FzeD`J_V_FUGQcx>Ars16D=#{u{j!`-D~yU>@Vs*A+%yr%ZGsm(WQf# z;8q|lhM3kYX2`FPi~2Va@t`{~0{=@}@w6PHYdmdLD=9jN9>DOVNNH#*%{j+VVx1~@ zjS`Bt{en)3tbNjqGCBO@0k;=?Y&U(j6$6pc7QbG`Y=wuX~7tqx!idsW!W@llI&0?=H_t1qb#0CbZVp;wG zD#%D{(-R?a00&k801W(#%76jiXhVY)3#5tf!GeixT*v>duCrmgCgUiaGdaPPhL4OA zu}^_%&cwn)c_#m~JjFQFWQLG(NLA4}a;()grSV|%bZD^@h3zfG?}3(1N}mKPOe#d0 zBhW1S4Qh*;&28WVKv@=OcTC?ZH@0|@@pAkJyhpWCfR8Z^fWy*ttFP3xIT-2zchLrf zkS3bhMc~{z02-oB)Y-5?nE|DTO~8wl^wi%JmT>yIZwo%#U^kU@Vq@~DirFaB<(o`} zTMp{u4xQC#YS*{k@hl&;bFq-tjqwZa4nOot01pPkg{ckq)fAN@cpXCU%z>knzA^F)%REX$pu? zG0uGDZBcXj;>O8vIJagKvzUlfQ#gWXev>Ow)WuC{6xR}=X0d%-Y4uh}0>&%4H3nlt zxJPhtskc3*Tqf4r_~=55ajj|m&mSq4DVCC@L$fmIj$2SWLP}?Ns-hnwo?!QB(C$+J zWcxS>EN}^wO(<5VBkB<0zyO1pC82vH003e`=13d)hh|t%amUw{hJdOJVIU9y#50jy zUL1na4SOSCQIj>WByn8|1K~zG!SX|h+J<6&2}PE%{0YXA^4T>sz0sPS#V z(T)HB01&6|%~rPrEjWcx`S@#{hU~dy)cq5dak8s?b)n-~^0=ePj!g@}y`f4^m;fq4 z44?(X{--PQnoR=#`Gz@9&5`NrOIo>fEmuToT%E4oI*r}+C+hcft(8XaUckk+ie0J^ zZaa3B>h(msmpop1DgnyaON}R9!*$nZq|8e~(o`X+7y(PA8Ph&Hh2aMAhUs9k{Ma6` z)gtGErDi3ETR7H@di9fZb-|7oC|$5Skv#Q1{qtk5rIeVPTH8cG!}Tm5H526^#$-I4 zBQvbvO-$18_O~->aaICv>wMHCrY&JVg=HNcJKL3F> z@&Phj*6=AviKJy5_!+TuDr=IA#03B=LYP7kz!dy@g|(UVm=b0Jnb5`|;dtbdLR_Cc z*nl7vZ3O^LmJi{EO{v(B!_{l|Ecy@0fz!E)RHKIL)C<@qo^Rw40D(Y~+Y#OeBzxy% zu`3x$z9|ozBqE{?5!6UV?oFWs@m}b>jtP5F*NDa|QbzV(762Yy6U5Sa8hC*wgB_LF z&keW$^!p3|37fzG5Q748MPLMh^cgUNc8fdn^CpWBMYfALW%^)fP%1DchY-^W$J_u_ z&qzPKfMGtmDmfV`*AFYBXjWN|wB&Ie$}^~{DMmJ(vfcO5{1ywOf0zfbIP$OM0Ly2X zE{*>6wq>^f0zCM^wq-GJ5=b+BalOAkVGiQKyeWl!p zl=aC8?22hjqh^;$?}TI^1>9$2qUCQ6Trm^N!LIs51(cnWL}Q$ zub+O4L&A9(Y4X)X`UQX80LIL_uFD@TlOmq`mvD3BZFN?RL^#gj`d?z{uvC|61oO02E)A?d*zxg zGawI!Lt==xEg%8mEH!15Pi)G3C4b^7ieZG|TOn{5zu_agVq70(d<|m}R!tSdQcgsAOLNchrlKaPgov23W=r4k%iUn;j;pUuOx(;-_ekuVhzU; zVw$R$`sS9=aE+Q?3@bD}eJG6B*y^dcc%0~1>^}z?7;iqb^~^jw&3#6~%v$DAUMfuh zJ)F{At`D#gWV)ikg#{eqamS}Ie?|kuxzJc-WtBmm9C8L=5TweCL`?OsX`F|4f;=FV zTao=`g1B_Q)i8z4xe0-~a9emQ%*9;Taqom^P=jQmjK|X%H=qs8aczGm=~6QQ7D=L~ z8%h}oxQrNd9o8yH;49I93NyCwhscxwa{>sm^QQ1>r~y|&pC}Q@AOH?1$Id7M{q18F z5bL{EFB261tv#V#JF5qQ>kwe@c0?FkUB&m~YNEdg)$yfO0?f|y)j(KUmqKw?u;_q5 z3*;xUE**8~r>qn*^($8vk&*%Cum#iqNTOdu4Q%yUXB^EnGfpqho9bn$tsBE`tBaF0c-YEZdQR2K# zXFz}@qk*`9rrrP;A4{8XnrbSyc~WD{&-?H z5HaNN3b`G44>1fGP?X!f=^3Fvg*9JB%8< z{Y{@eP{}M=P!|(a5(}I=l0S1*h=_^lv2n_E1wy_YTtb(o6@)Iq0qwWsN`k6QjJL6aG!5ifWFBJ;3mC#7*mH-U?if-&Ev zR&#I&{gSr=#+73MYbKXj78Z3-ZehZ=ZAC|``aA?e)13fFTMYS~#`Dk#!1Ds~cFVKM zBe6+tgl0+6?T+%}JU#ZqO!p2zcR(@EWntbx?w)X|0#^VD@R=s~Zy-BS%MBWve`yV^ z6&L_ozP_Q5I`)J)fZx#4@dQ8(Z|pg8ZRa4R?S@>yQ1e{Xz)yti5Uq+ij02`#r%QGd zxi;RQDd{~eTqbxG=z>fwwtXjDz9_(f^xh~$Dns!;uxrICZF}gMpeX9Y8;L-He^D@Q z)un*SrDl+7ZO~mSX=q zelrV9e_YomJKuSwqNk142Pl+_2GQ;ye234dJ&-?K$;K<%AS+$eF)Ha$3{AzsoqN;^ zLTUh?hqAE{Na}zv0~+X%N&OfKKReKt`3s#rwT7wX5G>|mN&Y#}>+{be*?l8vKl^H= zmtYjnOCTdq%K%wK+5j_8nAjX{vk5dRgOCs-NMLLz_9TGhko zB^@?Cl;x>QBKn8wRgWb=mXjBa50`)d0I6vB=e|oIoXX)>iO6Ncj^X8>_1RLu0%T#s zUwf>&k+?GShAbXD4b%i**gz~6Lc4tK7D4rd$d?aF5ikaG_vZ{`m34nKgE3SQaAQ42XHOdiXOl-!l~&Z~z7U=-&sz2YQ21 z1!E3zi5fRX1Nsn^qL%OWg-O3WIE7jz2Qe=$g8^VNU502I^Yb1fp+hhy?015ZNW5T3nIMJBvvjK|-bof6#cmp?S&>Tt%pA58L3V& z;cjVPxE7-U!qS?k;0iGO1|!X9Mhl|QF#Xj)JB$@Y1_QcWArYRO7}Qsa6*sEmkHh(z z-_th{_ncTBx-xwY0OL~488r?03YZfN=mQY}z-EX{XSJ~4Az5^r^*8Q*ytq;f&ARuDv~Os|B&H`o zVp3qgtPDS32l;nbRmnMb*QPa$;{}olXgh*_e8Qv&>=7Ji{@(g*_flcP>^;s`u}&);^mzP9s9wC-!lO_tZsk+<}^YPvj`wYf2&6-y&`^9v;_Zx1g_UJWyjwY`+rC%@m~x)qnU?>k2U!ret?eKQCNuPkj%zyNU5{T2%IC^y@?vM_&*M8|({ zdO49ub%4YLJUb3_Wb#&$qeDqKR z`5E;P(so>M6(NElK}>7J_$(vXWu|AN~?%H>ki@epdIBw5nj^ZjayqL!VQL1LU5_E%9*}CC1kVhkaVI z?*-YJXH#qzXn;-pl9Ob-VgZudYpGY-QkJ-fHEBq;0A`Q?w&8=0Kmgv$VURBL6${j2 znfrp8esfT@gl+MeqhJ$B2sdVkvY!M103u=tiC@}qr~n5T5oWap8(*Rr9-wEz#xNAV z$!tH6L+s0Bl8~a-H5dtEje0>LFTFsjisla(D^DqG`=#KR1854M6&qR%n>oi%7w?pk zQxwKcW1@qyJMmlLAIwWjI9B{?KJA<0{fSkgbpHoCI_?M!j!vS$AS}U$eYx&Nm#%N+do~p&TB?Q<9u{Mty44E2FFu$(- zns{3mBxs--64ZBlsEMxXZ!ilslwy^6FpgAEWUh}iV$5W1mZICgo zqznKWC>lR+<6U8>JWlgHKioaUPrK6PzUSDpG;iq zPY1xe literal 0 HcmV?d00001 From be8f9a4d0d053a32b12d1ba84f48f7e343facc81 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Tue, 29 Apr 2025 11:17:02 +0200 Subject: [PATCH 07/88] Add description motor order SO-101 leader (#1051) --- examples/12_use_so101.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/examples/12_use_so101.md b/examples/12_use_so101.md index 161712c3b9..5b8797b7c0 100644 --- a/examples/12_use_so101.md +++ b/examples/12_use_so101.md @@ -225,6 +225,18 @@ Here is a video of the process: ## Step-by-Step Assembly Instructions +The follower arm uses 6x STS3215 motors with 1/345 gearing. The leader however uses three differently geared motors to make sure it can both sustain its own weight and it can be moved without requiring much force. Which motor is needed for which joint is shown in table below. + +| Leader-Arm Axis | Motor | Gear Ratio | +|-----------------|:-------:|:----------:| +| Base / Shoulder Yaw | 1 | 1 / 191 | +| Shoulder Pitch | 2 | 1 / 345 | +| Elbow | 3 | 1 / 191 | +| Wrist Roll | 4 | 1 / 147 | +| Wrist Pitch | 5 | 1 / 147 | +| Gripper | 6 | 1 / 147 | + + ### Clean Parts Remove all support material from the 3D-printed parts. From 1a45b264e71f14d956dcc0949ad4e8cb36db1e11 Mon Sep 17 00:00:00 2001 From: Caroline Pascal Date: Tue, 29 Apr 2025 17:39:35 +0200 Subject: [PATCH 08/88] feat(encoding): switching to PyAV for ffmpeg related tasks (#983) --- benchmarks/video/run_video_benchmark.py | 4 +- lerobot/common/datasets/video_utils.py | 221 +++++++++++++----------- pyproject.toml | 2 +- 3 files changed, 124 insertions(+), 103 deletions(-) diff --git a/benchmarks/video/run_video_benchmark.py b/benchmarks/video/run_video_benchmark.py index c62578c46e..9d587ee9fd 100644 --- a/benchmarks/video/run_video_benchmark.py +++ b/benchmarks/video/run_video_benchmark.py @@ -416,7 +416,7 @@ def main( "--vcodec", type=str, nargs="*", - default=["libx264", "libx265", "libsvtav1"], + default=["libx264", "hevc", "libsvtav1"], help="Video codecs to be tested", ) parser.add_argument( @@ -446,7 +446,7 @@ def main( # nargs="*", # default=[0, 1], # help="Use the fastdecode tuning option. 0 disables it. " - # "For libx264 and libx265, only 1 is possible. " + # "For libx264 and libx265/hevc, only 1 is possible. " # "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization", # ) parser.add_argument( diff --git a/lerobot/common/datasets/video_utils.py b/lerobot/common/datasets/video_utils.py index c38d570ddf..375314e985 100644 --- a/lerobot/common/datasets/video_utils.py +++ b/lerobot/common/datasets/video_utils.py @@ -13,16 +13,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import glob import importlib -import json import logging -import subprocess import warnings -from collections import OrderedDict from dataclasses import dataclass, field from pathlib import Path from typing import Any, ClassVar +import av import pyarrow as pa import torch import torchvision @@ -252,51 +251,83 @@ def encode_video_frames( g: int | None = 2, crf: int | None = 30, fast_decode: int = 0, - log_level: str | None = "error", + log_level: int | None = av.logging.ERROR, overwrite: bool = False, ) -> None: """More info on ffmpeg arguments tuning on `benchmark/video/README.md`""" + # Check encoder availability + if vcodec not in ["h264", "hevc", "libsvtav1"]: + raise ValueError(f"Unsupported video codec: {vcodec}. Supported codecs are: h264, hevc, libsvtav1.") + video_path = Path(video_path) imgs_dir = Path(imgs_dir) - video_path.parent.mkdir(parents=True, exist_ok=True) - - ffmpeg_args = OrderedDict( - [ - ("-f", "image2"), - ("-r", str(fps)), - ("-i", str(imgs_dir / "frame_%06d.png")), - ("-vcodec", vcodec), - ("-pix_fmt", pix_fmt), - ] + + video_path.parent.mkdir(parents=True, exist_ok=overwrite) + + # Encoders/pixel formats incompatibility check + if (vcodec == "libsvtav1" or vcodec == "hevc") and pix_fmt == "yuv444p": + logging.warning( + f"Incompatible pixel format 'yuv444p' for codec {vcodec}, auto-selecting format 'yuv420p'" + ) + pix_fmt = "yuv420p" + + # Get input frames + template = "frame_" + ("[0-9]" * 6) + ".png" + input_list = sorted( + glob.glob(str(imgs_dir / template)), key=lambda x: int(x.split("_")[-1].split(".")[0]) ) + # Define video output frame size (assuming all input frames are the same size) + if len(input_list) == 0: + raise FileNotFoundError(f"No images found in {imgs_dir}.") + dummy_image = Image.open(input_list[0]) + width, height = dummy_image.size + + # Define video codec options + video_options = {} + if g is not None: - ffmpeg_args["-g"] = str(g) + video_options["g"] = str(g) if crf is not None: - ffmpeg_args["-crf"] = str(crf) + video_options["crf"] = str(crf) if fast_decode: - key = "-svtav1-params" if vcodec == "libsvtav1" else "-tune" + key = "svtav1-params" if vcodec == "libsvtav1" else "tune" value = f"fast-decode={fast_decode}" if vcodec == "libsvtav1" else "fastdecode" - ffmpeg_args[key] = value + video_options[key] = value + # Set logging level if log_level is not None: - ffmpeg_args["-loglevel"] = str(log_level) - - ffmpeg_args = [item for pair in ffmpeg_args.items() for item in pair] - if overwrite: - ffmpeg_args.append("-y") - - ffmpeg_cmd = ["ffmpeg"] + ffmpeg_args + [str(video_path)] - # redirect stdin to subprocess.DEVNULL to prevent reading random keyboard inputs from terminal - subprocess.run(ffmpeg_cmd, check=True, stdin=subprocess.DEVNULL) + # "While less efficient, it is generally preferable to modify logging with Python’s logging" + logging.getLogger("libav").setLevel(log_level) + + # Create and open output file (overwrite by default) + with av.open(str(video_path), "w") as output: + output_stream = output.add_stream(vcodec, fps, options=video_options) + output_stream.pix_fmt = pix_fmt + output_stream.width = width + output_stream.height = height + + # Loop through input frames and encode them + for input_data in input_list: + input_image = Image.open(input_data).convert("RGB") + input_frame = av.VideoFrame.from_image(input_image) + packet = output_stream.encode(input_frame) + if packet: + output.mux(packet) + + # Flush the encoder + packet = output_stream.encode() + if packet: + output.mux(packet) + + # Reset logging level + if log_level is not None: + av.logging.restore_default_callback() if not video_path.exists(): - raise OSError( - f"Video encoding did not work. File not found: {video_path}. " - f"Try running the command manually to debug: `{''.join(ffmpeg_cmd)}`" - ) + raise OSError(f"Video encoding did not work. File not found: {video_path}.") @dataclass @@ -332,78 +363,68 @@ def __call__(self): def get_audio_info(video_path: Path | str) -> dict: - ffprobe_audio_cmd = [ - "ffprobe", - "-v", - "error", - "-select_streams", - "a:0", - "-show_entries", - "stream=channels,codec_name,bit_rate,sample_rate,bit_depth,channel_layout,duration", - "-of", - "json", - str(video_path), - ] - result = subprocess.run(ffprobe_audio_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - if result.returncode != 0: - raise RuntimeError(f"Error running ffprobe: {result.stderr}") - - info = json.loads(result.stdout) - audio_stream_info = info["streams"][0] if info.get("streams") else None - if audio_stream_info is None: - return {"has_audio": False} - - # Return the information, defaulting to None if no audio stream is present - return { - "has_audio": True, - "audio.channels": audio_stream_info.get("channels", None), - "audio.codec": audio_stream_info.get("codec_name", None), - "audio.bit_rate": int(audio_stream_info["bit_rate"]) if audio_stream_info.get("bit_rate") else None, - "audio.sample_rate": int(audio_stream_info["sample_rate"]) - if audio_stream_info.get("sample_rate") - else None, - "audio.bit_depth": audio_stream_info.get("bit_depth", None), - "audio.channel_layout": audio_stream_info.get("channel_layout", None), - } + # Set logging level + logging.getLogger("libav").setLevel(av.logging.ERROR) + + # Getting audio stream information + audio_info = {} + with av.open(str(video_path), "r") as audio_file: + try: + audio_stream = audio_file.streams.audio[0] + except IndexError: + # Reset logging level + av.logging.restore_default_callback() + return {"has_audio": False} + + audio_info["audio.channels"] = audio_stream.channels + audio_info["audio.codec"] = audio_stream.codec.canonical_name + # In an ideal loseless case : bit depth x sample rate x channels = bit rate. + # In an actual compressed case, the bit rate is set according to the compression level : the lower the bit rate, the more compression is applied. + audio_info["audio.bit_rate"] = audio_stream.bit_rate + audio_info["audio.sample_rate"] = audio_stream.sample_rate # Number of samples per second + # In an ideal loseless case : fixed number of bits per sample. + # In an actual compressed case : variable number of bits per sample (often reduced to match a given depth rate). + audio_info["audio.bit_depth"] = audio_stream.format.bits + audio_info["audio.channel_layout"] = audio_stream.layout.name + audio_info["has_audio"] = True + + # Reset logging level + av.logging.restore_default_callback() + + return audio_info def get_video_info(video_path: Path | str) -> dict: - ffprobe_video_cmd = [ - "ffprobe", - "-v", - "error", - "-select_streams", - "v:0", - "-show_entries", - "stream=r_frame_rate,width,height,codec_name,nb_frames,duration,pix_fmt", - "-of", - "json", - str(video_path), - ] - result = subprocess.run(ffprobe_video_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - if result.returncode != 0: - raise RuntimeError(f"Error running ffprobe: {result.stderr}") - - info = json.loads(result.stdout) - video_stream_info = info["streams"][0] - - # Calculate fps from r_frame_rate - r_frame_rate = video_stream_info["r_frame_rate"] - num, denom = map(int, r_frame_rate.split("/")) - fps = num / denom - - pixel_channels = get_video_pixel_channels(video_stream_info["pix_fmt"]) - - video_info = { - "video.fps": fps, - "video.height": video_stream_info["height"], - "video.width": video_stream_info["width"], - "video.channels": pixel_channels, - "video.codec": video_stream_info["codec_name"], - "video.pix_fmt": video_stream_info["pix_fmt"], - "video.is_depth_map": False, - **get_audio_info(video_path), - } + # Set logging level + logging.getLogger("libav").setLevel(av.logging.ERROR) + + # Getting video stream information + video_info = {} + with av.open(str(video_path), "r") as video_file: + try: + video_stream = video_file.streams.video[0] + except IndexError: + # Reset logging level + av.logging.restore_default_callback() + return {} + + video_info["video.height"] = video_stream.height + video_info["video.width"] = video_stream.width + video_info["video.codec"] = video_stream.codec.canonical_name + video_info["video.pix_fmt"] = video_stream.pix_fmt + video_info["video.is_depth_map"] = False + + # Calculate fps from r_frame_rate + video_info["video.fps"] = int(video_stream.base_rate) + + pixel_channels = get_video_pixel_channels(video_stream.pix_fmt) + video_info["video.channels"] = pixel_channels + + # Reset logging level + av.logging.restore_default_callback() + + # Adding audio stream information + video_info.update(**get_audio_info(video_path)) return video_info diff --git a/pyproject.toml b/pyproject.toml index db3d8e21cf..72047a4fbf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,7 +62,7 @@ dependencies = [ "omegaconf>=2.3.0", "opencv-python-headless>=4.9.0", "packaging>=24.2", - "av>=12.0.5", + "av>=14.2.0", "pymunk>=6.6.0", "pynput>=1.7.7", "pyzmq>=26.2.1", From d3f59915df64ad15d0251f734b4717c34e1270f0 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Fri, 2 May 2025 12:47:23 +0200 Subject: [PATCH 09/88] feat(docs): Add new docs build process (#1046) Co-authored-by: Mishig Davaadorj Co-authored-by: Steven Palma --- .github/workflows/build_documentation.yml | 23 ++ .github/workflows/build_pr_documentation.yml | 19 + .github/workflows/upload_pr_documentation.yml | 16 + docs/README.md | 137 +++++++ docs/source/_toctree.yml | 12 + docs/source/assemble_so101.mdx | 336 ++++++++++++++++ .../getting_started_real_world_robot.mdx | 370 ++++++++++++++++++ docs/source/index.mdx | 19 + docs/source/installation.mdx | 84 ++++ pyproject.toml | 1 + 10 files changed, 1017 insertions(+) create mode 100644 .github/workflows/build_documentation.yml create mode 100644 .github/workflows/build_pr_documentation.yml create mode 100644 .github/workflows/upload_pr_documentation.yml create mode 100644 docs/README.md create mode 100644 docs/source/_toctree.yml create mode 100644 docs/source/assemble_so101.mdx create mode 100644 docs/source/getting_started_real_world_robot.mdx create mode 100644 docs/source/index.mdx create mode 100644 docs/source/installation.mdx diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml new file mode 100644 index 0000000000..884e2e4b55 --- /dev/null +++ b/.github/workflows/build_documentation.yml @@ -0,0 +1,23 @@ +name: Build documentation + +on: + workflow_dispatch: + push: + paths: + - "docs/**" + branches: + - main + - doc-builder* + - v*-release + + +jobs: + build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers + uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main + with: + commit_sha: ${{ github.sha }} + package: lerobot + additional_args: --not_python_module + secrets: + token: ${{ secrets.HUGGINGFACE_PUSH }} + hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml new file mode 100644 index 0000000000..51bab10d5c --- /dev/null +++ b/.github/workflows/build_pr_documentation.yml @@ -0,0 +1,19 @@ +name: Build PR Documentation + +on: + pull_request: + paths: + - "docs/**" + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers + uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main + with: + commit_sha: ${{ github.event.pull_request.head.sha }} + pr_number: ${{ github.event.number }} + package: lerobot + additional_args: --not_python_module diff --git a/.github/workflows/upload_pr_documentation.yml b/.github/workflows/upload_pr_documentation.yml new file mode 100644 index 0000000000..32665930bb --- /dev/null +++ b/.github/workflows/upload_pr_documentation.yml @@ -0,0 +1,16 @@ +name: Upload PR Documentation + +on: # zizmor: ignore[dangerous-triggers] We follow the same pattern as in Transformers + workflow_run: + workflows: [ "Build PR Documentation" ] + types: + - completed + +jobs: + build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers + uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main + with: + package_name: lerobot + secrets: + hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }} + comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }} diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000..275fee46bb --- /dev/null +++ b/docs/README.md @@ -0,0 +1,137 @@ + + +# Generating the documentation + +To generate the documentation, you first have to build it. Several packages are necessary to build the doc, +you can install them with the following command, at the root of the code repository: + +```bash +pip install -e ".[docs]" +``` + +You will also need `nodejs`. Please refer to their [installation page](https://nodejs.org/en/download) + +--- +**NOTE** + +You only need to generate the documentation to inspect it locally (if you're planning changes and want to +check how they look before committing for instance). You don't have to `git commit` the built documentation. + +--- + +## Building the documentation + +Once you have setup the `doc-builder` and additional packages, you can generate the documentation by +typing the following command: + +```bash +doc-builder build lerobot docs/source/ --build_dir ~/tmp/test-build +``` + +You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate +the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite +Markdown editor. + +## Previewing the documentation + +To preview the docs, first install the `watchdog` module with: + +```bash +pip install watchdog +``` + +Then run the following command: + +```bash +doc-builder preview lerobot docs/source/ +``` + +The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. + +--- +**NOTE** + +The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). + +--- + +## Adding a new element to the navigation bar + +Accepted files are Markdown (.md). + +Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting +the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/lerobot/blob/main/docs/source/_toctree.yml) file. + +## Renaming section headers and moving sections + +It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. + +Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. + +So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: + +``` +Sections that were moved: + +[ Section A ] +``` +and of course, if you moved it to another file, then: + +``` +Sections that were moved: + +[ Section A ] +``` + +Use the relative style to link to the new file so that the versioned docs continue to work. + +For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md). + +### Adding a new tutorial + +Adding a new tutorial or section is done in two steps: + +- Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). +- Link that file in `./source/_toctree.yml` on the correct toc-tree. + +Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR. + +### Writing source documentation + +Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names +and objects like True, None or any strings should usually be put in `code`. + +#### Writing a multi-line code block + +Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: + + +```` +``` +# first line of code +# second line +# etc +``` +```` + +#### Adding an image + +Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like +the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference +them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). +If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images +to this dataset. diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml new file mode 100644 index 0000000000..a0f69d0ac9 --- /dev/null +++ b/docs/source/_toctree.yml @@ -0,0 +1,12 @@ +- sections: + - local: index + title: LeRobot + - local: installation + title: Installation + title: Get started +- sections: + - local: assemble_so101 + title: Assemble SO-101 + - local: getting_started_real_world_robot + title: Getting Started with Real-World Robots + title: "Tutorials" diff --git a/docs/source/assemble_so101.mdx b/docs/source/assemble_so101.mdx new file mode 100644 index 0000000000..c6fad5a086 --- /dev/null +++ b/docs/source/assemble_so101.mdx @@ -0,0 +1,336 @@ +# Assemble SO-101 + +In the steps below we explain how to assemble our flagship robot, the SO-101. + +## Source the parts + +Follow this [README](https://github.com/TheRobotStudio/SO-ARM100). It contains the bill of materials, with a link to source the parts, as well as the instructions to 3D print the parts, +and advice if it's your first time printing or if you don't own a 3D printer. + +Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly. + +## Install LeRobot + +To install LeRobot follow our [Installation Guide](./installation) + +## Configure motors + +To configure the motors designate one bus servo adapter and 6 motors for your leader arm, and similarly the other bus servo adapter and 6 motors for the follower arm. It's convenient to label them and write on each motor if it's for the follower `F` or for the leader `L` and it's ID from 1 to 6. + +You now should plug the 5V or 12V power supply to the motor bus. 5V for the STS3215 7.4V motors and 12V for the STS3215 12V motors. Note that the leader arm always uses the 7.4V motors, so watch out that you plug in the right power supply if you have 12V and 7.4V motors, otherwise you might burn your motors! Now, connect the motor bus to your computer via USB. Note that the USB doesn't provide any power, and both the power supply and USB have to be plugged in. + +### Find the USB ports associated to each arm + +To find the port for each bus servo adapter, run this script: +```bash +python lerobot/scripts/find_motors_bus_port.py +``` +##### Example outputs of script + + + + +Example output leader arm's port: `/dev/tty.usbmodem575E0031751` + +```bash +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect leader arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0031751 +Reconnect the usb cable. +``` + +Example output follower arm port: `/dev/tty.usbmodem575E0032081` + +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect follower arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the usb cable. +``` + + + + +On Linux, you might need to give access to the USB ports by running: +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +Example output leader arm port: `/dev/ttyACM0` + +```bash +Finding all available ports for the MotorBus. +['/dev/ttyACM0', '/dev/ttyACM1'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect leader arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM0 +Reconnect the usb cable. +``` + +Example output follower arm port: `/dev/ttyACM1` + +``` +Finding all available ports for the MotorBus. +['/dev/ttyACM0', '/dev/ttyACM1'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect follower arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM1 +Reconnect the usb cable. +``` + + + +#### Update config file + +Now that you have your ports, update the **port** default values of [`SO101RobotConfig`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robot_devices/robots/configs.py). +You will find something like, update the `port` values with your actual motor ports: +```python +@RobotConfig.register_subclass("so101") +@dataclass +class So101RobotConfig(ManipulatorRobotConfig): + calibration_dir: str = ".cache/calibration/so101" + # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. + # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as + # the number of motors in your follower arms. + max_relative_target: int | None = None + + leader_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) + + follower_arms: dict[str, MotorsBusConfig] = field( + default_factory=lambda: { + "main": FeetechMotorsBusConfig( + port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE + motors={ + # name: (index, model) + "shoulder_pan": [1, "sts3215"], + "shoulder_lift": [2, "sts3215"], + "elbow_flex": [3, "sts3215"], + "wrist_flex": [4, "sts3215"], + "wrist_roll": [5, "sts3215"], + "gripper": [6, "sts3215"], + }, + ), + } + ) +``` + +Here is a video of the process: + + +## Step-by-Step Assembly Instructions + +The follower arm uses 6x STS3215 motors with 1/345 gearing. The leader however uses three differently geared motors to make sure it can both sustain its own weight and it can be moved without requiring much force. Which motor is needed for which joint is shown in table below. + +| Leader-Arm Axis | Motor | Gear Ratio | +|-----------------|:-------:|:----------:| +| Base / Shoulder Yaw | 1 | 1 / 191 | +| Shoulder Pitch | 2 | 1 / 345 | +| Elbow | 3 | 1 / 191 | +| Wrist Roll | 4 | 1 / 147 | +| Wrist Pitch | 5 | 1 / 147 | +| Gripper | 6 | 1 / 147 | + +### Set motor IDs + +Plug your motor in one of the two ports of the motor bus and run this script to set its ID to 1. Replace the text after --port to the corresponding control board port. +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem58760432961 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 1 +``` + +Then unplug your motor and plug the second motor and set its ID to 2. +```bash +python lerobot/scripts/configure_motor.py \ + --port /dev/tty.usbmodem58760432961 \ + --brand feetech \ + --model sts3215 \ + --baudrate 1000000 \ + --ID 2 +``` + +Redo this process for all your motors until ID 6. Do the same for the 6 motors of the leader arm, but make sure to change the power supply if you use motors with different voltage and make sure you give the right ID to the right motor according to the table above. + +Here is a video of the process: + + +### Clean Parts +Remove all support material from the 3D-printed parts, the easiest wat to do this is using a small screwdriver to get underneath the support material. + +### Joint 1 + +- Place the first motor into the base. +- Fasten the motor with 4 M2x6mm screws (smallest screws). Two from the top and two from bottom. +- Slide over the first motor holder and fasten it using two M2x6mm screws (one on each side). +- Install both motor horns, securing the top horn with a M3x6mm screw. +- Attach the shoulder part. +- Tighten the shoulder part with 4 M3x6mm screws on top and 4 M3x6mm screws on the bottom +- Add the shoulder motor holder. + + + +### Joint 2 + +- Slide the second motor in from the top. +- Fasten the second motor with 4 M2x6mm screws. +- Attach both motor horns to motor 2, again use the M3x6mm horn screw. +- Attach the upper arm with 4 M3x6mm screws on each side. + + + +### Joint 3 + +- Insert motor 3 and fasten using 4 M2x6mm screws +- Attach both motor horns to motor 3 and secure one again with a M3x6mm horn screw. +- Connect the forearm to motor 3 using 4 M3x6mm screws on each side. + + + +### Joint 4 + +- Slide over motor holder 4. +- Slide in motor 4. +- Fasten motor 4 with 4 M2x6mm screws and attach its motor horns, use a M3x6mm horn screw. + + + +### Joint 5 + +- Insert motor 5 into the wrist holder and secure it with 2 M2x6mm front screws. +- Install only one motor horn on the wrist motor and secure it with a M3x6mm horn screw. +- Secure the wrist to motor 4 using 4 M3x6mm screws on both sides. + + + +### Gripper / Handle + + + + +- Attach the gripper to motor 5, attach it to the motor horn on the wrist using 4 M3x6mm screws. +- Insert the gripper motor and secure it with 2 M2x6mm screws on each side. +- Attach the motor horns and again use a M3x6mm horn screw. +- Install the gripper claw and secure it with 4 M3x6mm screws on both sides. + + + + + + +- Mount the leader holder onto the wrist and secure it with 4 M3x6mm screws. +- Attach the handle to motor 5 using 1 M2x6mm screw. +- Insert the gripper motor, secure it with 2 M2x6mm screws on each side, attach a motor horn using a M3x6mm horn screw. +- Attach the follower trigger with 4 M3x6mm screws. + + + + + + +##### Wiring + +- Attach the motor controller on the back. +- Then insert all wires, use the wire guides everywhere to make sure the wires don't unplug themself and stay in place. + + + +## Calibrate + +Next, you'll need to calibrate your SO-101 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. +The calibration process is very important because it allows a neural network trained on one SO-101 robot to work on another. + +#### Manual calibration of follower arm + +You will need to move the follower arm to these positions sequentially, note that the rotated position is on the right side of the robot and you have to open the gripper fully. + +| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SO-101 leader arm middle position | SO-101 leader arm zero position | SO-101 leader arm rotated position | SO-101 leader arm rest position | + +Make sure both arms are connected and run this script to launch manual calibration: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --robot.cameras='{}' \ + --control.type=calibrate \ + --control.arms='["main_follower"]' +``` + +#### Manual calibration of leader arm +You will also need to move the leader arm to these positions sequentially: + +| 1. Middle position | 2. Zero position | 3. Rotated position | 4. Rest position | +| ------------ |------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| SO-101 leader arm middle position | SO-101 leader arm zero position | SO-101 leader arm rotated position | SO-101 leader arm rest position | + +Run this script to launch manual calibration: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --robot.cameras='{}' \ + --control.type=calibrate \ + --control.arms='["main_leader"]' +``` + +Congrats 🎉, your robot is all set to learn a task on its own. Start training it by following this tutorial: [Getting started with real-world robots](./getting_started_real_world_robot) diff --git a/docs/source/getting_started_real_world_robot.mdx b/docs/source/getting_started_real_world_robot.mdx new file mode 100644 index 0000000000..f580b9fe02 --- /dev/null +++ b/docs/source/getting_started_real_world_robot.mdx @@ -0,0 +1,370 @@ +# Getting Started with Real-World Robots + +This tutorial will explain you how to train a neural network to autonomously control a real robot. + +**You'll learn:** +1. How to record and visualize your dataset. +2. How to train a policy using your data and prepare it for evaluation. +3. How to evaluate your policy and visualize the results. + +By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934). + +This tutorial is specifically made for the affordable [SO-101](https://github.com/TheRobotStudio/SO-ARM100) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The SO-101 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot. + +During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously. + +If you encounter any issues at any step of the tutorial, feel free to seek help on [Discord](https://discord.com/invite/s3KuuzsPFb) or don't hesitate to iterate with us on the tutorial by creating issues or pull requests. + +## Setup and Calibrate + +If you haven't yet setup and calibrate the SO-101 follow these steps: +1. [Find ports and update config file](./assemble_so101#find-the-usb-ports-associated-to-each-arm) +2. [Calibrate](./assemble_so101#calibrate) + +## Teleoperate + +Run this simple script to teleoperate your robot (it won't connect and display the cameras): +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --robot.cameras='{}' \ + --control.type=teleoperate +``` + +The teleoperate command will automatically: +1. Identify any missing calibrations and initiate the calibration procedure. +2. Connect the robot and start teleoperation. + +## Setup Cameras + +To connect a camera you have three options: +1. OpenCVCamera which allows us to use any camera: usb, realsense, laptop webcam +2. iPhone camera with MacOS +3. Phone camera on Linux + +### Use OpenCVCamera + +The [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py) class allows you to efficiently record frames from most cameras using the [`opencv2`](https://docs.opencv.org) library. For more details on compatibility, see [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html). + +To instantiate an [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py), you need a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera like a webcam of a laptop, the camera index is usually `0` but it might differ, and the camera index might change if you reboot your computer or re-plug your camera. This behavior depends on your operating system. + +To find the camera indices, run the following utility script, which will save a few frames from each detected camera: +```bash +python lerobot/common/robot_devices/cameras/opencv.py \ + --images-dir outputs/images_from_opencv_cameras +``` + +The output will look something like this if you have two cameras connected: +``` +Mac or Windows detected. Finding available camera indices through scanning all indices from 0 to 60 +[...] +Camera found at index 0 +Camera found at index 1 +[...] +Connecting cameras +OpenCVCamera(0, fps=30.0, width=1920.0, height=1080.0, color_mode=rgb) +OpenCVCamera(1, fps=24.0, width=1920.0, height=1080.0, color_mode=rgb) +Saving images to outputs/images_from_opencv_cameras +Frame: 0000 Latency (ms): 39.52 +[...] +Frame: 0046 Latency (ms): 40.07 +Images have been saved to outputs/images_from_opencv_cameras +``` + +Check the saved images in `outputs/images_from_opencv_cameras` to identify which camera index corresponds to which physical camera (e.g. `0` for `camera_00` or `1` for `camera_01`): +``` +camera_00_frame_000000.png +[...] +camera_00_frame_000047.png +camera_01_frame_000000.png +[...] +camera_01_frame_000047.png +``` + +Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green. + +Now that you have the camera indexes, you should specify the camera's in the config. TODO(pepijn): add more info about setting camera config, rotate etc.. + +### Use your phone + + + +To use your iPhone as a camera on macOS, enable the Continuity Camera feature: +- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later. +- Sign in both devices with the same Apple ID. +- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection. + +For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac). + +Your iPhone should be detected automatically when running the camera setup script in the next section. + + + + +If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera + +1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using: +```python +sudo apt install v4l2loopback-dkms v4l-utils +``` +2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android. +3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org): +```python +flatpak install flathub com.obsproject.Studio +``` +4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with: +```python +flatpak install flathub com.obsproject.Studio.Plugin.DroidCam +``` +5. *Start OBS Studio*. Launch with: +```python +flatpak run com.obsproject.Studio +``` +6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`. +7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in. +8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide). +9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices: +```python +v4l2-ctl --list-devices +``` +You should see an entry like: +``` +VirtualCam (platform:v4l2loopback-000): +/dev/video1 +``` +10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`. +```python +v4l2-ctl -d /dev/video1 --get-fmt-video +``` +You should see an entry like: +``` +>>> Format Video Capture: +>>> Width/Height : 640/480 +>>> Pixel Format : 'YUYV' (YUYV 4:2:2) +``` + +Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed. + +If everything is set up correctly, you can proceed with the rest of the tutorial. + + + + +## Teleoperate with cameras + +We can now teleoperate again while at the same time visualzing the camera's and joint positions with `rerun`. + +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --control.type=teleoperate + --control.display_data=true +``` + +## Record a dataset + +Once you're familiar with teleoperation, you can record your first dataset with SO-101. + +We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you've can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). + +Add your token to the cli by running this command: +```bash +huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential +``` + +Then store your Hugging Face repository name in a variable: +```bash +HF_USER=$(huggingface-cli whoami | head -n 1) +echo $HF_USER +``` + +Now you can record a dataset, to record 2 episodes and upload your dataset to the hub execute this command: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/so101_test \ + --control.tags='["so101","tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=2 \ + --control.push_to_hub=true +``` + +You will see a lot of lines appearing like this one: +``` +INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz) +``` + +| Field | Meaning | +|:---|:---| +| `2024-08-10 15:02:58` | Timestamp when `print` was called. | +| `ol_robot.py:219` | Source file and line number of the `print` call (`lerobot/scripts/control_robot.py` at line `219`). | +| `dt: 33.34 (30.0 Hz)` | Delta time (ms) between teleop steps (target: 30.0 Hz, `--fps 30`). Yellow if step is too slow. | +| `dtRlead: 5.06 (197.5 Hz)` | Delta time (ms) for reading present position from the **leader arm**. | +| `dtWfoll: 0.25 (3963.7 Hz)` | Delta time (ms) for writing goal position to the **follower arm** (asynchronous). | +| `dtRfoll: 6.22 (160.7 Hz)` | Delta time (ms) for reading present position from the **follower arm**. | +| `dtRlaptop: 32.57 (30.7 Hz)` | Delta time (ms) for capturing an image from the **laptop camera** (async thread). | +| `dtRphone: 33.84 (29.5 Hz)` | Delta time (ms) for capturing an image from the **phone camera** (async thread). | + + +#### Dataset upload +Locally your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/so101_test`). At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running: +```bash +echo https://huggingface.co/datasets/${HF_USER}/so101_test +``` +Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example). + +You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot). + +#### Record function + +The `record` function provides a suite of tools for capturing and managing data during robot operation: + +##### 1. Frame Capture and Video Encoding +- Frames from cameras are saved to disk during recording. +- At the end of each episode, frames are encoded into video files. + +##### 2. Data Storage +- Data is stored using the `LeRobotDataset` format. +- By default, the dataset is pushed to your Hugging Face page. + - To disable uploading, use `--control.push_to_hub=false`. + +##### 3. Checkpointing and Resuming +- Checkpoints are automatically created during recording. +- If an issue occurs, you can resume by re-running the same command with `--control.resume=true`. +- To start recording from scratch, **manually delete** the dataset directory. + +##### 4. Recording Parameters +Set the flow of data recording using command-line arguments: +- `--control.warmup_time_s=10` + Number of seconds before starting data collection (default: **10 seconds**). + Allows devices to warm up and synchronize. +- `--control.episode_time_s=60` + Duration of each data recording episode (default: **60 seconds**). +- `--control.reset_time_s=60` + Duration for resetting the environment after each episode (default: **60 seconds**). +- `--control.num_episodes=50` + Total number of episodes to record (default: **50**). + +##### 5. Keyboard Controls During Recording +Control the data recording flow using keyboard shortcuts: +- Press **Right Arrow (`→`)**: Early stop the current episode or reset time and move to the next. +- Press **Left Arrow (`←`)**: Cancel the current episode and re-record it. +- Press **Escape (`ESC`)**: Immediately stop the session, encode videos, and upload the dataset. + +#### Tips for gathering data + +Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images. + +In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions. + +Avoid adding too much variation too quickly, as it may hinder your results. + + +#### Troubleshooting: +- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux). + +## Visualize a dataset + +If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: +```bash +echo ${HF_USER}/so101_test +``` + +If you didn't upload with `--control.push_to_hub=false`, you can visualize it locally with (via a window in the browser `http://127.0.0.1:9090` with the visualization tool): +```bash +python lerobot/scripts/visualize_dataset_html.py \ + --repo-id ${HF_USER}/so101_test \ + --local-files-only 1 +``` + +This will launch a local web server that looks like this: +
+ Koch v1.1 leader and follower arms +
+ +## Replay an episode + +A useful feature is the `replay` function, which allows to replay on your robot any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model. + +You can replay the first episode on your robot with: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --control.type=replay \ + --control.fps=30 \ + --control.repo_id=${HF_USER}/so101_test \ + --control.episode=0 +``` + +Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com). + +## Train a policy + +To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +```bash +python lerobot/scripts/train.py \ + --dataset.repo_id=${HF_USER}/so101_test \ + --policy.type=act \ + --output_dir=outputs/train/act_so101_test \ + --job_name=act_so101_test \ + --policy.device=cuda \ + --wandb.enable=true +``` + +Let's explain the command: +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. +5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. + +Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`. + +To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy: +```bash +python lerobot/scripts/train.py \ + --config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \ + --resume=true +``` + +#### Upload policy checkpoints + +Once training is done, upload the latest checkpoint with: +```bash +huggingface-cli upload ${HF_USER}/act_so101_test \ + outputs/train/act_so101_test/checkpoints/last/pretrained_model +``` + +You can also upload intermediate checkpoints with: +```bash +CKPT=010000 +huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \ + outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model +``` + +## Evaluate your policy + +You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: +```bash +python lerobot/scripts/control_robot.py \ + --robot.type=so101 \ + --control.type=record \ + --control.fps=30 \ + --control.single_task="Grasp a lego block and put it in the bin." \ + --control.repo_id=${HF_USER}/eval_act_so101_test \ + --control.tags='["tutorial"]' \ + --control.warmup_time_s=5 \ + --control.episode_time_s=30 \ + --control.reset_time_s=30 \ + --control.num_episodes=10 \ + --control.push_to_hub=true \ + --control.policy.path=outputs/train/act_so101_test/checkpoints/last/pretrained_model +``` + +As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: +1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`). +2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`). diff --git a/docs/source/index.mdx b/docs/source/index.mdx new file mode 100644 index 0000000000..dba71474e2 --- /dev/null +++ b/docs/source/index.mdx @@ -0,0 +1,19 @@ + + +# LeRobot + +**State-of-the-art machine learning for real-world robotics** + +🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier for entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. + +🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning: github.com/huggingface/lerobot + +🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there. + +🤗 LeRobot hosts pretrained models and datasets on this HuggingFace community page: huggingface.co/lerobot + +Join the LeRobot community on [Discord](https://discord.gg/s3KuuzsPFb) diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx new file mode 100644 index 0000000000..3823d30e7c --- /dev/null +++ b/docs/source/installation.mdx @@ -0,0 +1,84 @@ +# Installation + +## Install LeRobot + +Download our source code: +```bash +git clone https://github.com/huggingface/lerobot.git +cd lerobot +``` + +Create a virtual environment with Python 3.10, using [`Miniconda`](https://docs.anaconda.com/miniconda/install/#quick-command-line-install) +```bash +conda create -y -n lerobot python=3.10 +``` + +Now restart the shell by running: + + + +```bash +source ~/.bashrc +``` + + + +```bash +source ~/.bash_profile +``` + + + +```bash +source ~/.zshrc +``` + + + +Then activate your conda environment, you have to do this each time you open a shell to use lerobot: +```bash +conda activate lerobot +``` + +When using `miniconda`, install `ffmpeg` in your environment: +```bash +conda install ffmpeg -c conda-forge +``` + +> [!TIP] +> This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can: +> - _[On any platform]_ Explicitly install `ffmpeg 7.X` using: +> ```bash +> conda install ffmpeg=7.1.1 -c conda-forge +> ``` +> - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`. + +Install 🤗 LeRobot: +```bash +cd lerobot && pip install ".[feetech]" +``` + +## Troubleshooting +If you encounter build errors, you may need to install additional dependencies: `cmake`, `build-essential`, and `ffmpeg libs`. +To install these for linux run: +```bash +sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config +``` +For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg) + +## Sim +For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras: +- [aloha](https://github.com/huggingface/gym-aloha) +- [xarm](https://github.com/huggingface/gym-xarm) +- [pusht](https://github.com/huggingface/gym-pusht) + +For instance, to install 🤗 LeRobot with aloha and pusht, use: +```bash +pip install -e ".[aloha, pusht]" +``` + +## W&B +To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with +```bash +wandb login +``` diff --git a/pyproject.toml b/pyproject.toml index 72047a4fbf..70c8298218 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,6 +77,7 @@ dependencies = [ [project.optional-dependencies] aloha = ["gym-aloha>=0.1.1 ; python_version < '4.0'"] +docs = ["hf-doc-builder @ git+https://github.com/huggingface/doc-builder.git@main", "watchdog >= 6.0.0"] dev = ["pre-commit>=3.7.0", "debugpy>=1.8.1"] dora = [ "gym-dora @ git+https://github.com/dora-rs/dora-lerobot.git#subdirectory=gym_dora ; python_version < '4.0'", From 60b5a21294337ddb65141fd1d87972805dce7318 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Fri, 2 May 2025 16:10:13 +0200 Subject: [PATCH 10/88] Docs: adapt text + fix video code (#1064) --- docs/source/assemble_so101.mdx | 90 +++++++++++++++++++--------------- docs/source/index.mdx | 6 +-- 2 files changed, 53 insertions(+), 43 deletions(-) diff --git a/docs/source/assemble_so101.mdx b/docs/source/assemble_so101.mdx index c6fad5a086..3aceb6b929 100644 --- a/docs/source/assemble_so101.mdx +++ b/docs/source/assemble_so101.mdx @@ -143,10 +143,11 @@ class So101RobotConfig(ManipulatorRobotConfig): ``` Here is a video of the process: - +
+ +
## Step-by-Step Assembly Instructions @@ -186,10 +187,11 @@ python lerobot/scripts/configure_motor.py \ Redo this process for all your motors until ID 6. Do the same for the 6 motors of the leader arm, but make sure to change the power supply if you use motors with different voltage and make sure you give the right ID to the right motor according to the table above. Here is a video of the process: - +
+ +
### Clean Parts Remove all support material from the 3D-printed parts, the easiest wat to do this is using a small screwdriver to get underneath the support material. @@ -204,10 +206,11 @@ Remove all support material from the 3D-printed parts, the easiest wat to do thi - Tighten the shoulder part with 4 M3x6mm screws on top and 4 M3x6mm screws on the bottom - Add the shoulder motor holder. - +
+ +
### Joint 2 @@ -216,10 +219,11 @@ Remove all support material from the 3D-printed parts, the easiest wat to do thi - Attach both motor horns to motor 2, again use the M3x6mm horn screw. - Attach the upper arm with 4 M3x6mm screws on each side. - +
+ +
### Joint 3 @@ -227,10 +231,11 @@ Remove all support material from the 3D-printed parts, the easiest wat to do thi - Attach both motor horns to motor 3 and secure one again with a M3x6mm horn screw. - Connect the forearm to motor 3 using 4 M3x6mm screws on each side. - +
+ +
### Joint 4 @@ -238,10 +243,11 @@ Remove all support material from the 3D-printed parts, the easiest wat to do thi - Slide in motor 4. - Fasten motor 4 with 4 M2x6mm screws and attach its motor horns, use a M3x6mm horn screw. - +
+ +
### Joint 5 @@ -249,10 +255,11 @@ Remove all support material from the 3D-printed parts, the easiest wat to do thi - Install only one motor horn on the wrist motor and secure it with a M3x6mm horn screw. - Secure the wrist to motor 4 using 4 M3x6mm screws on both sides. - +
+ +
### Gripper / Handle @@ -264,10 +271,11 @@ Remove all support material from the 3D-printed parts, the easiest wat to do thi - Attach the motor horns and again use a M3x6mm horn screw. - Install the gripper claw and secure it with 4 M3x6mm screws on both sides. - +
+ +
@@ -277,10 +285,11 @@ Remove all support material from the 3D-printed parts, the easiest wat to do thi - Insert the gripper motor, secure it with 2 M2x6mm screws on each side, attach a motor horn using a M3x6mm horn screw. - Attach the follower trigger with 4 M3x6mm screws. - +
+ +
@@ -290,10 +299,11 @@ Remove all support material from the 3D-printed parts, the easiest wat to do thi - Attach the motor controller on the back. - Then insert all wires, use the wire guides everywhere to make sure the wires don't unplug themself and stay in place. - +
+ +
## Calibrate diff --git a/docs/source/index.mdx b/docs/source/index.mdx index dba71474e2..b8ff56ea77 100644 --- a/docs/source/index.mdx +++ b/docs/source/index.mdx @@ -10,10 +10,10 @@ 🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier for entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models. -🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning: github.com/huggingface/lerobot +🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning. -🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there. +🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started. -🤗 LeRobot hosts pretrained models and datasets on this HuggingFace community page: huggingface.co/lerobot +🤗 LeRobot hosts pretrained models and datasets on the LeRobot HuggingFace page. Join the LeRobot community on [Discord](https://discord.gg/s3KuuzsPFb) From 9d59f12043b17d1a413123cdd7f55e472042f2fa Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 5 May 2025 10:35:32 +0200 Subject: [PATCH 11/88] Fix typos (#1070) --- README.md | 6 +++--- docs/source/assemble_so101.mdx | 2 +- docs/source/getting_started_real_world_robot.mdx | 6 +++--- examples/10_use_so100.md | 2 +- examples/11_use_lekiwi.md | 4 ++-- examples/11_use_moss.md | 6 +++--- examples/12_use_so101.md | 6 +++--- examples/2_evaluate_pretrained_policy.py | 4 ++-- examples/3_train_policy.py | 2 +- examples/4_train_policy_with_script.md | 8 ++++---- examples/7_get_started_with_real_robot.md | 6 +++--- examples/8_use_stretch.md | 2 +- examples/9_use_aloha.md | 2 +- examples/advanced/2_calculate_validation_loss.py | 2 +- lerobot/common/datasets/factory.py | 2 +- lerobot/common/datasets/transforms.py | 2 +- lerobot/scripts/eval.py | 4 ++-- 17 files changed, 33 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index 946693350d..dd3dc8c73b 100644 --- a/README.md +++ b/README.md @@ -221,7 +221,7 @@ dataset attributes: │ ├ episode_index (int64): index of the episode for this sample │ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode │ ├ timestamp (float32): timestamp in the episode - │ ├ next.done (bool): indicates the end of en episode ; True for the last frame in each episode + │ ├ next.done (bool): indicates the end of an episode ; True for the last frame in each episode │ └ index (int64): general index in the whole dataset ├ episode_data_index: contains 2 tensors with the start and end indices of each episode │ ├ from (1D int64 tensor): first frame index for each episode — shape (num episodes,) starts with 0 @@ -270,7 +270,7 @@ See `python lerobot/scripts/eval.py --help` for more instructions. ### Train your own policy -Check out [example 3](./examples/3_train_policy.py) that illustrate how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line. +Check out [example 3](./examples/3_train_policy.py) that illustrates how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line. To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding `--wandb.enable=true`. @@ -321,7 +321,7 @@ Once you have trained a policy you may upload it to the Hugging Face hub using a You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain: - `config.json`: A serialized version of the policy configuration (following the policy's dataclass config). - `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format. -- `train_config.json`: A consolidated configuration containing all parameter userd for training. The policy configuration should match `config.json` exactly. Thisis useful for anyone who wants to evaluate your policy or for reproducibility. +- `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility. To upload these to the hub, run the following: ```bash diff --git a/docs/source/assemble_so101.mdx b/docs/source/assemble_so101.mdx index 3aceb6b929..3150061bd0 100644 --- a/docs/source/assemble_so101.mdx +++ b/docs/source/assemble_so101.mdx @@ -194,7 +194,7 @@ Here is a video of the process:
### Clean Parts -Remove all support material from the 3D-printed parts, the easiest wat to do this is using a small screwdriver to get underneath the support material. +Remove all support material from the 3D-printed parts, the easiest way to do this is using a small screwdriver to get underneath the support material. ### Joint 1 diff --git a/docs/source/getting_started_real_world_robot.mdx b/docs/source/getting_started_real_world_robot.mdx index f580b9fe02..dbc16d8a2b 100644 --- a/docs/source/getting_started_real_world_robot.mdx +++ b/docs/source/getting_started_real_world_robot.mdx @@ -152,7 +152,7 @@ If everything is set up correctly, you can proceed with the rest of the tutorial ## Teleoperate with cameras -We can now teleoperate again while at the same time visualzing the camera's and joint positions with `rerun`. +We can now teleoperate again while at the same time visualizing the camera's and joint positions with `rerun`. ```bash python lerobot/scripts/control_robot.py \ @@ -165,7 +165,7 @@ python lerobot/scripts/control_robot.py \ Once you're familiar with teleoperation, you can record your first dataset with SO-101. -We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you've can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). +We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). Add your token to the cli by running this command: ```bash @@ -318,7 +318,7 @@ python lerobot/scripts/train.py \ Let's explain the command: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`. -2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. diff --git a/examples/10_use_so100.md b/examples/10_use_so100.md index 9385c7f575..7f6430d7db 100644 --- a/examples/10_use_so100.md +++ b/examples/10_use_so100.md @@ -578,7 +578,7 @@ python lerobot/scripts/train.py \ Let's explain it: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so100_test`. -2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. diff --git a/examples/11_use_lekiwi.md b/examples/11_use_lekiwi.md index 1be7cbc4ae..4c15dcd102 100644 --- a/examples/11_use_lekiwi.md +++ b/examples/11_use_lekiwi.md @@ -134,7 +134,7 @@ First we will assemble the two SO100 arms. One to attach to the mobile base and ## SO100 Arms ### Configure motors -The instructions for configuring the motors can be found [Here](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md#c-configure-the-motors) in step C of the SO100 tutorial. Besides the ID's for the arm motors we also need to set the motor ID's for the mobile base. These needs to be in a specific order to work. Below an image of the motor ID's and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ID's for the wheels are 7, 8 and 9. +The instructions for configuring the motors can be found [Here](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md#c-configure-the-motors) in step C of the SO100 tutorial. Besides the ID's for the arm motors we also need to set the motor ID's for the mobile base. These need to be in a specific order to work. Below an image of the motor ID's and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ID's for the wheels are 7, 8 and 9. Motor ID's for mobile robot @@ -567,7 +567,7 @@ python lerobot/scripts/train.py \ Let's explain it: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/lekiwi_test`. -2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. diff --git a/examples/11_use_moss.md b/examples/11_use_moss.md index 1b6f23b9ab..b2e1857378 100644 --- a/examples/11_use_moss.md +++ b/examples/11_use_moss.md @@ -44,7 +44,7 @@ cd ~/lerobot && pip install -e ".[feetech]" ## Configure the motors -Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below. +Follow step 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below. **Find USB ports associated to your arms** To find the correct ports for each arm, run the utility script twice: @@ -164,7 +164,7 @@ Try to avoid rotating the motor while doing so to keep position 2048 set during ## Assemble the arms -Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm. +Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get used to it, you can do it under 1 hour for the second arm. ## Calibrate @@ -301,7 +301,7 @@ python lerobot/scripts/train.py \ Let's explain it: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/moss_test`. -2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. diff --git a/examples/12_use_so101.md b/examples/12_use_so101.md index 5b8797b7c0..0886d988dc 100644 --- a/examples/12_use_so101.md +++ b/examples/12_use_so101.md @@ -428,7 +428,7 @@ camera_01_frame_000047.png Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green. -Now that you have the camera indexes, you should change then in the config. You can also change the fps, width or height of the camera. +Now that you have the camera indexes, you should change them in the config. You can also change the fps, width or height of the camera. The camera config is defined per robot, can be found here [`RobotConfig`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robot_devices/robots/configs.py) and looks like this: ```python @@ -515,7 +515,7 @@ If you have an additional camera you can add a wrist camera to the SO101. There ## Teleoperate with cameras -We can now teleoperate again while at the same time visualzing the camera's and joint positions with `rerun`. +We can now teleoperate again while at the same time visualizing the camera's and joint positions with `rerun`. ```bash python lerobot/scripts/control_robot.py \ @@ -528,7 +528,7 @@ python lerobot/scripts/control_robot.py \ Once you're familiar with teleoperation, you can record your first dataset with SO-100. -We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you've can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). +We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens). Add your token to the cli by running this command: ```bash diff --git a/examples/2_evaluate_pretrained_policy.py b/examples/2_evaluate_pretrained_policy.py index 6860695894..4e6154c2e5 100644 --- a/examples/2_evaluate_pretrained_policy.py +++ b/examples/2_evaluate_pretrained_policy.py @@ -13,7 +13,7 @@ # limitations under the License. """ -This scripts demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local +This script demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first. It requires the installation of the 'gym_pusht' simulation environment. Install it by running: @@ -119,7 +119,7 @@ rewards.append(reward) frames.append(env.render()) - # The rollout is considered done when the success state is reach (i.e. terminated is True), + # The rollout is considered done when the success state is reached (i.e. terminated is True), # or the maximum number of iterations is reached (i.e. truncated is True) done = terminated | truncated | done step += 1 diff --git a/examples/3_train_policy.py b/examples/3_train_policy.py index 6c3af54ead..f9c251a02f 100644 --- a/examples/3_train_policy.py +++ b/examples/3_train_policy.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This scripts demonstrates how to train Diffusion Policy on the PushT environment. +"""This script demonstrates how to train Diffusion Policy on the PushT environment. Once you have trained a model with this script, you can try to evaluate it on examples/2_evaluate_pretrained_policy.py diff --git a/examples/4_train_policy_with_script.md b/examples/4_train_policy_with_script.md index 0c11afe982..cb4cc6268e 100644 --- a/examples/4_train_policy_with_script.md +++ b/examples/4_train_policy_with_script.md @@ -1,5 +1,5 @@ This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run. -> **Note:** The following assume you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu. +> **Note:** The following assumes you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu. ## The training script @@ -23,7 +23,7 @@ def train(cfg: TrainPipelineConfig): You can inspect the `TrainPipelineConfig` defined in [`lerobot/configs/train.py`](../lerobot/configs/train.py) (which is heavily commented and meant to be a reference to understand any option) -When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated for this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.) +When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated to this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.) Let's have a look at a simplified example. Amongst other attributes, the training config has the following attributes: ```python @@ -43,7 +43,7 @@ class DatasetConfig: ``` This creates a hierarchical relationship where, for example assuming we have a `cfg` instance of `TrainPipelineConfig`, we can access the `repo_id` value with `cfg.dataset.repo_id`. -From the command line, we can specify this value with using a very similar syntax `--dataset.repo_id=repo/id`. +From the command line, we can specify this value by using a very similar syntax `--dataset.repo_id=repo/id`. By default, every field takes its default value specified in the dataclass. If a field doesn't have a default value, it needs to be specified either from the command line or from a config file – which path is also given in the command line (more in this below). In the example above, the `dataset` field doesn't have a default value which means it must be specified. @@ -135,7 +135,7 @@ will start a training run with the same configuration used for training [lerobot ## Resume training -Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to that here. +Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to do that here. Let's reuse the command from the previous run and add a few more options: ```bash diff --git a/examples/7_get_started_with_real_robot.md b/examples/7_get_started_with_real_robot.md index a31524bfb9..9a4db5257d 100644 --- a/examples/7_get_started_with_real_robot.md +++ b/examples/7_get_started_with_real_robot.md @@ -377,7 +377,7 @@ robot = ManipulatorRobot(robot_config) The `KochRobotConfig` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger. -For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `AlohaRobotConfig` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected for Aloha. +For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `AlohaRobotConfig` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected for Aloha. **Calibrate and Connect the ManipulatorRobot** @@ -399,7 +399,7 @@ And here are the corresponding positions for the leader arm: You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details. -During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to measure if the values changed negatively or positively. +During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask you to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to measure if the values changed negatively or positively. Finally, the rest position ensures that the follower and leader arms are roughly aligned after calibration, preventing sudden movements that could damage the motors when starting teleoperation. @@ -622,7 +622,7 @@ camera_01_frame_000047.png Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green. -Finally, run this code to instantiate and connectyour camera: +Finally, run this code to instantiate and connect your camera: ```python from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera diff --git a/examples/8_use_stretch.md b/examples/8_use_stretch.md index a7a7dde17f..982e725719 100644 --- a/examples/8_use_stretch.md +++ b/examples/8_use_stretch.md @@ -99,7 +99,7 @@ This is equivalent to running `stretch_robot_home.py` > **Note:** If you run any of the LeRobot scripts below and Stretch is not properly homed, it will automatically home/calibrate first. **Teleoperate** -Before trying teleoperation, you need activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation). +Before trying teleoperation, you need to activate the gamepad controller by pressing the middle button. For more info, see Stretch's [doc](https://docs.hello-robot.com/0.3/getting_started/hello_robot/#gamepad-teleoperation). Now try out teleoperation (see above documentation to learn about the gamepad controls): diff --git a/examples/9_use_aloha.md b/examples/9_use_aloha.md index 77cff16115..be2a323b63 100644 --- a/examples/9_use_aloha.md +++ b/examples/9_use_aloha.md @@ -142,7 +142,7 @@ python lerobot/scripts/train.py \ Let's explain it: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/aloha_test`. -2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. diff --git a/examples/advanced/2_calculate_validation_loss.py b/examples/advanced/2_calculate_validation_loss.py index 47b4dd028b..aac8e2e4ec 100644 --- a/examples/advanced/2_calculate_validation_loss.py +++ b/examples/advanced/2_calculate_validation_loss.py @@ -66,7 +66,7 @@ def main(): print(f"Number of episodes in full dataset: {total_episodes}") print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}") print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}") - # - Load train an val datasets + # - Load train and val datasets train_dataset = LeRobotDataset( "lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps ) diff --git a/lerobot/common/datasets/factory.py b/lerobot/common/datasets/factory.py index 38c01b42f8..88d3f767fc 100644 --- a/lerobot/common/datasets/factory.py +++ b/lerobot/common/datasets/factory.py @@ -49,7 +49,7 @@ def resolve_delta_timestamps( "observation.state": [-0.04, -0.02, 0] "observation.action": [-0.02, 0, 0.02] } - returns `None` if the the resulting dict is empty. + returns `None` if the resulting dict is empty. """ delta_timestamps = {} for key in ds_meta.features: diff --git a/lerobot/common/datasets/transforms.py b/lerobot/common/datasets/transforms.py index 720c939b8f..3ac1d57715 100644 --- a/lerobot/common/datasets/transforms.py +++ b/lerobot/common/datasets/transforms.py @@ -128,7 +128,7 @@ def _check_input(self, sharpness): raise TypeError(f"{sharpness=} should be a single number or a sequence with length 2.") if not 0.0 <= sharpness[0] <= sharpness[1]: - raise ValueError(f"sharpnesss values should be between (0., inf), but got {sharpness}.") + raise ValueError(f"sharpness values should be between (0., inf), but got {sharpness}.") return float(sharpness[0]), float(sharpness[1]) diff --git a/lerobot/scripts/eval.py b/lerobot/scripts/eval.py index 9790f8b317..58275f666c 100644 --- a/lerobot/scripts/eval.py +++ b/lerobot/scripts/eval.py @@ -94,8 +94,8 @@ def rollout( data will probably need to be discarded (for environments that aren't the first one to be done). The return dictionary contains: - (optional) "observation": A a dictionary of (batch, sequence + 1, *) tensors mapped to observation - keys. NOTE the that this has an extra sequence element relative to the other keys in the + (optional) "observation": A dictionary of (batch, sequence + 1, *) tensors mapped to observation + keys. NOTE that this has an extra sequence element relative to the other keys in the dictionary. This is because an extra observation is included for after the environment is terminated or truncated. "action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not From 8bcfe4e338856cb7e9e501554d9b78228c7aa643 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Fri, 9 May 2025 11:00:25 +0200 Subject: [PATCH 12/88] docs: minor corrections and clean-up (#1089) --- docs/source/assemble_so101.mdx | 12 +++++++----- docs/source/getting_started_real_world_robot.mdx | 4 ++-- examples/12_use_so101.md | 8 +++++--- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/docs/source/assemble_so101.mdx b/docs/source/assemble_so101.mdx index 3150061bd0..de280a3923 100644 --- a/docs/source/assemble_so101.mdx +++ b/docs/source/assemble_so101.mdx @@ -96,8 +96,8 @@ Reconnect the usb cable. #### Update config file Now that you have your ports, update the **port** default values of [`SO101RobotConfig`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robot_devices/robots/configs.py). -You will find something like, update the `port` values with your actual motor ports: -```python +You will find a class called `so101` where you can update the `port` values with your actual motor ports: +```diff @RobotConfig.register_subclass("so101") @dataclass class So101RobotConfig(ManipulatorRobotConfig): @@ -110,7 +110,8 @@ class So101RobotConfig(ManipulatorRobotConfig): leader_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": FeetechMotorsBusConfig( - port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE +- port="/dev/tty.usbmodem58760431091", ++ port="{ADD YOUR LEADER PORT}", motors={ # name: (index, model) "shoulder_pan": [1, "sts3215"], @@ -127,7 +128,8 @@ class So101RobotConfig(ManipulatorRobotConfig): follower_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": FeetechMotorsBusConfig( - port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE +- port="/dev/tty.usbmodem585A0076891", ++ port="{ADD YOUR FOLLOWER PORT}", motors={ # name: (index, model) "shoulder_pan": [1, "sts3215"], @@ -297,7 +299,7 @@ Remove all support material from the 3D-printed parts, the easiest way to do thi ##### Wiring - Attach the motor controller on the back. -- Then insert all wires, use the wire guides everywhere to make sure the wires don't unplug themself and stay in place. +- Then insert all wires, use the wire guides everywhere to make sure the wires don't unplug themselves and stay in place.

-

+

Build Your Own SO-101 Robot!

@@ -48,11 +48,11 @@

Train it in minutes with a few simple moves on your laptop.

Then sit back and watch your creation act autonomously! 🤯

-

+

See the full SO-101 tutorial here.

Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!

-

Check out the LeKiwi tutorial and bring your robot to life on wheels.

+

Check out the LeKiwi tutorial and bring your robot to life on wheels.

LeKiwi mobile robot From 3b32731624d9e8954f7f94af59e83e4ab5bac8e4 Mon Sep 17 00:00:00 2001 From: Sarunas Kalade Date: Tue, 10 Jun 2025 04:42:54 -0600 Subject: [PATCH 36/88] update KochFollower.get_observation() so it returns same observation structure as SO101 (#1248) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- lerobot/common/robots/koch_follower/koch_follower.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lerobot/common/robots/koch_follower/koch_follower.py b/lerobot/common/robots/koch_follower/koch_follower.py index 9ba506b4bd..64ece25f2b 100644 --- a/lerobot/common/robots/koch_follower/koch_follower.py +++ b/lerobot/common/robots/koch_follower/koch_follower.py @@ -20,7 +20,6 @@ from typing import Any from lerobot.common.cameras.utils import make_cameras_from_configs -from lerobot.common.constants import OBS_STATE from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.common.motors import Motor, MotorCalibration, MotorNormMode from lerobot.common.motors.dynamixel import ( @@ -175,11 +174,9 @@ def get_observation(self) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") - obs_dict = {} - # Read arm position start = time.perf_counter() - obs_dict[OBS_STATE] = self.bus.sync_read("Present_Position") + obs_dict = self.bus.sync_read("Present_Position") obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read state: {dt_ms:.1f}ms") From 79b928e91811b83aa7b13d3edffb6810624b221a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 10 Jun 2025 18:04:09 +0200 Subject: [PATCH 37/88] [pre-commit.ci] pre-commit autoupdate (#1185) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Simon Alibert <75076266+aliberts@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c2ee3b20c5..e1f971d39b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/adhtruong/mirrors-typos - rev: v1.32.0 + rev: v1.33.1 hooks: - id: typos args: [--force-exclude] @@ -48,7 +48,7 @@ repos: - id: pyupgrade - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.11 + rev: v0.11.13 hooks: - id: ruff args: [--fix] @@ -57,12 +57,12 @@ repos: ##### Security ##### - repo: https://github.com/gitleaks/gitleaks - rev: v8.26.0 + rev: v8.27.2 hooks: - id: gitleaks - repo: https://github.com/woodruffw/zizmor-pre-commit - rev: v1.8.0 + rev: v1.9.0 hooks: - id: zizmor From 36908fcfa488681f00400a4b32b83910d3f6aef1 Mon Sep 17 00:00:00 2001 From: koenvanwijk Date: Tue, 10 Jun 2025 18:36:02 +0200 Subject: [PATCH 38/88] Proposal for fix for enter_pressed on Windows (#1230) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Simon Alibert <75076266+aliberts@users.noreply.github.com> --- lerobot/common/utils/utils.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index 756ad9f0a9..08e9a3c06b 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -233,7 +233,15 @@ def is_valid_numpy_dtype_string(dtype_str: str) -> bool: def enter_pressed() -> bool: - return select.select([sys.stdin], [], [], 0)[0] and sys.stdin.readline().strip() == "" + if platform.system() == "Windows": + import msvcrt + + if msvcrt.kbhit(): + key = msvcrt.getch() + return key in (b"\r", b"\n") # enter key + return False + else: + return select.select([sys.stdin], [], [], 0)[0] and sys.stdin.readline().strip() == "" def move_cursor_up(lines): From 235d8b3007df9cbe504e0d478e1974780d7657f5 Mon Sep 17 00:00:00 2001 From: Yushun Xiang <73413365+YushunXiang@users.noreply.github.com> Date: Wed, 11 Jun 2025 00:46:41 +0800 Subject: [PATCH 39/88] fix: update pi0 dependency version constraint (#1247) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- lerobot/common/policies/pi0/paligemma_with_expert.py | 6 +++++- lerobot/common/policies/pi0fast/modeling_pi0fast.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/lerobot/common/policies/pi0/paligemma_with_expert.py b/lerobot/common/policies/pi0/paligemma_with_expert.py index 76e2ce6005..49c844c7bf 100644 --- a/lerobot/common/policies/pi0/paligemma_with_expert.py +++ b/lerobot/common/policies/pi0/paligemma_with_expert.py @@ -216,7 +216,11 @@ def to_bfloat16_like_physical_intelligence(self): param.data = param.data.to(dtype=torch.bfloat16) def embed_image(self, image: torch.Tensor): - return self.paligemma.get_image_features(image) + # Handle different transformers versions + if hasattr(self.paligemma, "get_image_features"): + return self.paligemma.get_image_features(image) + else: + return self.paligemma.model.get_image_features(image) def embed_language_tokens(self, tokens: torch.Tensor): return self.paligemma.language_model.model.embed_tokens(tokens) diff --git a/lerobot/common/policies/pi0fast/modeling_pi0fast.py b/lerobot/common/policies/pi0fast/modeling_pi0fast.py index 4996b1a083..a2df40f26f 100644 --- a/lerobot/common/policies/pi0fast/modeling_pi0fast.py +++ b/lerobot/common/policies/pi0fast/modeling_pi0fast.py @@ -878,7 +878,11 @@ def generate_actions(self, batch: dict[str, Tensor]): return actions def embed_image(self, image: torch.Tensor): - return self.pi0_paligemma.get_image_features(image) + # Handle different transformers versions + if hasattr(self.pi0_paligemma, "get_image_features"): + return self.pi0_paligemma.get_image_features(image) + else: + return self.pi0_paligemma.model.get_image_features(image) def embed_inputs( self, From 3305d2ed4835c517e91e97639f98393901931c19 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Wed, 11 Jun 2025 14:21:30 +0200 Subject: [PATCH 40/88] Match motor names with ids lekiwi (#1261) --- examples/lekiwi/record.py | 4 ++-- lerobot/common/robots/lekiwi/config_lekiwi.py | 22 ++++++++++++------- lerobot/common/robots/lekiwi/lekiwi.py | 19 ++++++---------- lerobot/common/robots/lekiwi/lekiwi_client.py | 16 +++++++------- lerobot/common/robots/lekiwi/lekiwi_host.py | 8 +++---- 5 files changed, 34 insertions(+), 35 deletions(-) diff --git a/examples/lekiwi/record.py b/examples/lekiwi/record.py index 4f56213d7b..405a41bd38 100644 --- a/examples/lekiwi/record.py +++ b/examples/lekiwi/record.py @@ -23,7 +23,7 @@ dataset_features = {**action_features, **obs_features} dataset = LeRobotDataset.create( - repo_id="user/lekiwi" + str(int(time.time())), + repo_id="pepijn223/lekiwi" + str(int(time.time())), fps=10, features=dataset_features, robot_type=robot.name, @@ -36,7 +36,7 @@ if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected: exit() -print("Starting LeKiwi teleoperation") +print("Starting LeKiwi recording") i = 0 while i < NB_CYCLES_CLIENT_CONNECTION: arm_action = leader_arm.get_action() diff --git a/lerobot/common/robots/lekiwi/config_lekiwi.py b/lerobot/common/robots/lekiwi/config_lekiwi.py index 9876ada210..4bb5e4dc36 100644 --- a/lerobot/common/robots/lekiwi/config_lekiwi.py +++ b/lerobot/common/robots/lekiwi/config_lekiwi.py @@ -20,6 +20,17 @@ from ..config import RobotConfig +def lekiwi_cameras_config() -> dict[str, CameraConfig]: + return { + "front": OpenCVCameraConfig( + index_or_path="/dev/video0", fps=30, width=640, height=480, rotation=Cv2Rotation.ROTATE_180 + ), + "wrist": OpenCVCameraConfig( + index_or_path="/dev/video2", fps=30, width=480, height=640, rotation=Cv2Rotation.ROTATE_90 + ), + } + + @RobotConfig.register_subclass("lekiwi") @dataclass class LeKiwiConfig(RobotConfig): @@ -32,14 +43,7 @@ class LeKiwiConfig(RobotConfig): # the number of motors in your follower arms. max_relative_target: int | None = None - cameras: dict[str, CameraConfig] = field( - default_factory=lambda: { - "front": OpenCVCameraConfig(index_or_path="/dev/video0", fps=30, width=640, height=480), - "wrist": OpenCVCameraConfig( - index_or_path="/dev/video2", fps=30, width=640, height=480, rotation=Cv2Rotation.ROTATE_180 - ), - } - ) + cameras: dict[str, CameraConfig] = field(default_factory=lekiwi_cameras_config) # Set to `True` for backward compatibility with previous policies/dataset use_degrees: bool = False @@ -86,5 +90,7 @@ class LeKiwiClientConfig(RobotConfig): } ) + cameras: dict[str, CameraConfig] = field(default_factory=lekiwi_cameras_config) + polling_timeout_ms: int = 15 connect_timeout_s: int = 5 diff --git a/lerobot/common/robots/lekiwi/lekiwi.py b/lerobot/common/robots/lekiwi/lekiwi.py index a1c2ffa14b..f6a9b8bf13 100644 --- a/lerobot/common/robots/lekiwi/lekiwi.py +++ b/lerobot/common/robots/lekiwi/lekiwi.py @@ -23,7 +23,6 @@ import numpy as np from lerobot.common.cameras.utils import make_cameras_from_configs -from lerobot.common.constants import OBS_IMAGES, OBS_STATE from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.common.motors import Motor, MotorCalibration, MotorNormMode from lerobot.common.motors.feetech import ( @@ -65,8 +64,8 @@ def __init__(self, config: LeKiwiConfig): "arm_gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), # base "base_left_wheel": Motor(7, "sts3215", MotorNormMode.RANGE_M100_100), - "base_right_wheel": Motor(8, "sts3215", MotorNormMode.RANGE_M100_100), - "base_back_wheel": Motor(9, "sts3215", MotorNormMode.RANGE_M100_100), + "base_back_wheel": Motor(8, "sts3215", MotorNormMode.RANGE_M100_100), + "base_right_wheel": Motor(9, "sts3215", MotorNormMode.RANGE_M100_100), }, calibration=self.calibration, ) @@ -249,7 +248,7 @@ def _body_to_wheel_raw( velocity_vector = np.array([x, y, theta_rad]) # Define the wheel mounting angles with a -90° offset. - angles = np.radians(np.array([240, 120, 0]) - 90) + angles = np.radians(np.array([240, 0, 120]) - 90) # Build the kinematic matrix: each row maps body velocities to a wheel’s linear speed. # The third column (base_radius) accounts for the effect of rotation. m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles]) @@ -295,10 +294,7 @@ def _wheel_raw_to_body( base_radius : Distance from the robot center to each wheel (meters). Returns: - A dict (x_cmd, y_cmd, theta_cmd) where: - OBS_STATE.x_cmd : Linear velocity in x (m/s). - OBS_STATE.y_cmd : Linear velocity in y (m/s). - OBS_STATE.theta_cmd : Rotational velocity in deg/s. + A dict (x.vel, y.vel, theta.vel) all in m/s """ # Convert each raw command back to an angular speed in deg/s. @@ -316,7 +312,7 @@ def _wheel_raw_to_body( wheel_linear_speeds = wheel_radps * wheel_radius # Define the wheel mounting angles with a -90° offset. - angles = np.radians(np.array([240, 120, 0]) - 90) + angles = np.radians(np.array([240, 0, 120]) - 90) m = np.array([[np.cos(a), np.sin(a), base_radius] for a in angles]) # Solve the inverse kinematics: body_velocity = M⁻¹ · wheel_linear_speeds. @@ -347,16 +343,15 @@ def get_observation(self) -> dict[str, Any]: arm_state = {f"{k}.pos": v for k, v in arm_pos.items()} - flat_states = {**arm_state, **base_vel} + obs_dict = {**arm_state, **base_vel} - obs_dict = {f"{OBS_STATE}": flat_states} dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read state: {dt_ms:.1f}ms") # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() - obs_dict[f"{OBS_IMAGES}.{cam_key}"] = cam.async_read() + obs_dict[cam_key] = cam.async_read() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") diff --git a/lerobot/common/robots/lekiwi/lekiwi_client.py b/lerobot/common/robots/lekiwi/lekiwi_client.py index 927ed49f53..f79b7f81a0 100644 --- a/lerobot/common/robots/lekiwi/lekiwi_client.py +++ b/lerobot/common/robots/lekiwi/lekiwi_client.py @@ -25,7 +25,6 @@ import torch import zmq -from lerobot.common.constants import OBS_IMAGES, OBS_STATE from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from ..robot import Robot @@ -92,11 +91,8 @@ def _state_order(self) -> tuple[str, ...]: return tuple(self._state_ft.keys()) @cached_property - def _cameras_ft(self) -> dict[str, tuple]: - return { - "front": (480, 640, 3), - "wrist": (640, 480, 3), - } + def _cameras_ft(self) -> dict[str, tuple[int, int, int]]: + return {name: (cfg.height, cfg.width, 3) for name, cfg in self.config.cameras.items()} @cached_property def observation_features(self) -> dict[str, type | tuple]: @@ -199,7 +195,7 @@ def _remote_state_from_obs( self, observation: Dict[str, Any] ) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]: """Extracts frames, and state from the parsed observation.""" - flat_state = observation[OBS_STATE] + flat_state = {key: value for key, value in observation.items() if key in self._state_ft} state_vec = np.array( [flat_state.get(k, 0.0) for k in self._state_order], @@ -207,7 +203,11 @@ def _remote_state_from_obs( ) # Decode images - image_observation = {k: v for k, v in observation.items() if k.startswith(OBS_IMAGES)} + image_observation = { + f"observation.images.{key}": value + for key, value in observation.items() + if key in self._cameras_ft + } current_frames: Dict[str, np.ndarray] = {} for cam_name, image_b64 in image_observation.items(): frame = self._decode_image_from_b64(image_b64) diff --git a/lerobot/common/robots/lekiwi/lekiwi_host.py b/lerobot/common/robots/lekiwi/lekiwi_host.py index 014c965b7f..1155cf71c2 100644 --- a/lerobot/common/robots/lekiwi/lekiwi_host.py +++ b/lerobot/common/robots/lekiwi/lekiwi_host.py @@ -22,8 +22,6 @@ import cv2 import zmq -from lerobot.common.constants import OBS_IMAGES - from .config_lekiwi import LeKiwiConfig, LeKiwiHostConfig from .lekiwi import LeKiwi @@ -95,12 +93,12 @@ def main(): # Encode ndarrays to base64 strings for cam_key, _ in robot.cameras.items(): ret, buffer = cv2.imencode( - ".jpg", last_observation[f"{OBS_IMAGES}.{cam_key}"], [int(cv2.IMWRITE_JPEG_QUALITY), 90] + ".jpg", last_observation[cam_key], [int(cv2.IMWRITE_JPEG_QUALITY), 90] ) if ret: - last_observation[f"{OBS_IMAGES}.{cam_key}"] = base64.b64encode(buffer).decode("utf-8") + last_observation[cam_key] = base64.b64encode(buffer).decode("utf-8") else: - last_observation[f"{OBS_IMAGES}.{cam_key}"] = "" + last_observation[cam_key] = "" # Send the observation to the remote agent try: From bf99e98848e000c8b2a6ee06ace0f11b5bd708f2 Mon Sep 17 00:00:00 2001 From: Dana Aubakirova <118912928+danaaubakirova@users.noreply.github.com> Date: Wed, 11 Jun 2025 16:56:55 +0200 Subject: [PATCH 41/88] fix issues: checkpoints keys mismatch and 'task' tokenisation in smolvla (#1256) Co-authored-by: danaaubakirova Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Simon Alibert <75076266+aliberts@users.noreply.github.com> Co-authored-by: Simon Alibert --- .../policies/smolvla/modeling_smolvla.py | 116 ++++++++++++++++++ pyproject.toml | 2 +- 2 files changed, 117 insertions(+), 1 deletion(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index 6ac2d3e7ee..a6745880b4 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -53,8 +53,11 @@ """ import math +import os +import re from collections import deque +import safetensors import torch import torch.nn.functional as F # noqa: N812 from torch import Tensor, nn @@ -73,6 +76,98 @@ ) from lerobot.common.utils.utils import get_safe_dtype +# Matches ".soNNN", optionally followed by "-something", up to the "_buffer_" marker +_VARIANT_RE = re.compile(r"\.so\d+(?:-[\w]+)?_buffer_") + + +def canonicalise(k: str) -> str: + """ + Remove dataset-variant markers like '.so100-blue_' or '.so100_' from a + normalisation-buffer key. + """ + return _VARIANT_RE.sub(".buffer_", k) + + +def standardise_state_dict( + checkpoint: dict[str, torch.Tensor], ref_keys: set[str], *, verbose: bool = True +) -> tuple[dict[str, torch.Tensor], list[str]]: + """ + • Re-keys `checkpoint ` so that every entry matches the *reference* key set. + • If several variant keys collapse to the same canonical name we keep the + first one and log the collision. + • Returns the new dict + a list of entries that could not be matched. + """ + out, collisions, unmatched = {}, {}, [] + + for k, v in checkpoint.items(): + canon = canonicalise(k) + if canon in ref_keys: + if canon in out: # duplicate after collapsing + collisions.setdefault(canon, []).append(k) + else: + out[canon] = v + else: + unmatched.append(k) + + if verbose: + for canon, variants in collisions.items(): + print(f"[standardise_state_dict] '{canon}' ← {variants}") + if unmatched: + print(f"[standardise_state_dict] kept {len(unmatched)} unmatched keys") + + out.update({k: checkpoint[k] for k in unmatched}) + return out, unmatched + + +def rename_checkpoint_keys(checkpoint: dict, rename_str: str): + """ + Renames keys in a checkpoint dictionary based on the given rename string. + + Args: + checkpoint (dict): The checkpoint dictionary. + rename_str (str): A string specifying key mappings in the format "old1//new1,old2//new2". + + Returns: + dict: The modified checkpoint with renamed keys. + """ + + rename_dict = dict(pair.split("//") for pair in rename_str.split(",")) + + new_checkpoint = {} + for k, v in checkpoint.items(): + for old_key, new_key in rename_dict.items(): + if old_key in k: + k = k.replace(old_key, new_key) + new_checkpoint[k] = v + return new_checkpoint + + +def load_smolvla( + model: torch.nn.Module, + filename: str | os.PathLike, + *, + device: str = "cpu", + checkpoint_keys_mapping: str = "", +) -> torch.nn.Module: + state_dict = safetensors.torch.load_file(filename, device=device) + + # Optional user-supplied renames (e.g. "model._orig_mod.//model.") + if checkpoint_keys_mapping and "//" in checkpoint_keys_mapping: + state_dict = rename_checkpoint_keys(state_dict, checkpoint_keys_mapping) + + state_dict, _ = standardise_state_dict(state_dict, set(model.state_dict().keys())) + + missing, unexpected = model.load_state_dict(state_dict) + + if missing or unexpected: + raise RuntimeError( + "SmolVLA %d missing / %d unexpected keys", + len(missing), + len(unexpected), + ) + + return model + def create_sinusoidal_pos_embedding( time: torch.tensor, dimension: int, min_period: float, max_period: float, device="cpu" @@ -264,6 +359,23 @@ def reset(self): ACTION: deque(maxlen=self.config.n_action_steps), } + # HACK(aliberts, danaaubakirova): we overwrite this classmethod here to fix smolVLA-specific issues + @classmethod + def _load_as_safetensor( + cls, + model: "SmolVLAPolicy", + model_file: str, + map_location: str, + strict: bool, + ): + safetensors.torch.load_model(model, model_file, strict=strict, device=map_location) + return load_smolvla( + model, + model_file, + device=map_location, + checkpoint_keys_mapping="model._orig_mod.//model.", + ) + def get_optim_params(self) -> dict: return self.parameters() @@ -387,10 +499,14 @@ def prepare_language(self, batch) -> tuple[Tensor, Tensor]: """Tokenize the text input""" device = batch[OBS_STATE].device tasks = batch["task"] + if isinstance(tasks, str): + tasks = [tasks] + if len(tasks) == 1: tasks = [tasks[0] for _ in range(batch[OBS_STATE].shape[0])] tasks = [task if task.endswith("\n") else f"{task}\n" for task in tasks] + tokenized_prompt = self.language_tokenizer.__call__( tasks, padding=self.config.pad_language_to, diff --git a/pyproject.toml b/pyproject.toml index 2ce5d049be..a99b1b16c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -90,7 +90,7 @@ intelrealsense = [ "pyrealsense2-macosx>=2.54 ; sys_platform == 'darwin'", ] pi0 = ["transformers>=4.48.0"] -smolvla = ["transformers>=4.50.3", "num2words>=0.5.14", "accelerate>=1.7.0"] +smolvla = ["transformers>=4.50.3", "num2words>=0.5.14", "accelerate>=1.7.0", "safetensors>=0.4.3"] pusht = ["gym-pusht>=0.1.5 ; python_version < '4.0'"] stretch = [ "hello-robot-stretch-body>=0.7.27 ; python_version < '4.0' and sys_platform == 'linux'", From 50e6761801b3c074400ec9e106ee7596a4291f3f Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Wed, 11 Jun 2025 23:16:37 +0200 Subject: [PATCH 42/88] fix(docs): update realsense documentation (#1268) --- docs/source/cameras.mdx | 6 +++--- lerobot/find_cameras.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/source/cameras.mdx b/docs/source/cameras.mdx index 5556660e9d..d8a49c1ee7 100644 --- a/docs/source/cameras.mdx +++ b/docs/source/cameras.mdx @@ -75,13 +75,13 @@ finally: ```python -from lerobot.common.cameras.intel.configuration_realsense import RealSenseCameraConfig -from lerobot.common.cameras.intel.camera_realsense import RealSenseCamera +from lerobot.common.cameras.realsense.configuration_realsense import RealSenseCameraConfig +from lerobot.common.cameras.realsense.camera_realsense import RealSenseCamera from lerobot.common.cameras.configs import ColorMode, Cv2Rotation # Create a `RealSenseCameraConfig` specifying your camera’s serial number and enabling depth. config = RealSenseCameraConfig( - serial_number="233522074606", + serial_number_or_name="233522074606", fps=15, width=640, height=480, diff --git a/lerobot/find_cameras.py b/lerobot/find_cameras.py index 3b5c4af3c0..34f4865b1d 100644 --- a/lerobot/find_cameras.py +++ b/lerobot/find_cameras.py @@ -170,7 +170,7 @@ def create_camera_instance(cam_meta: Dict[str, Any]) -> Dict[str, Any] | None: instance = OpenCVCamera(cv_config) elif cam_type == "RealSense": rs_config = RealSenseCameraConfig( - serial_number_or_name=int(cam_id), + serial_number_or_name=cam_id, color_mode=ColorMode.RGB, ) instance = RealSenseCamera(rs_config) @@ -283,7 +283,7 @@ def save_images_from_all_cameras( print("\nFinalizing image saving...") executor.shutdown(wait=True) cleanup_cameras(cameras_to_use) - logger.info(f"Image capture finished. Images saved to {output_dir}") + print(f"Image capture finished. Images saved to {output_dir}") if __name__ == "__main__": From 456359c61140db6c9449ef2b48dc9770db963d9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Quentin=20Gallou=C3=A9dec?= <45557362+qgallouedec@users.noreply.github.com> Date: Thu, 12 Jun 2025 09:58:59 +0200 Subject: [PATCH 43/88] Use HF Papers (#1120) --- .../v2/batch_convert_dataset_v1_to_v2.py | 76 +++++++++---------- lerobot/common/policies/act/modeling_act.py | 8 +- .../diffusion/configuration_diffusion.py | 2 +- .../policies/diffusion/modeling_diffusion.py | 6 +- .../policies/pi0fast/modeling_pi0fast.py | 2 +- .../common/policies/tdmpc/modeling_tdmpc.py | 4 +- .../common/policies/vqbet/modeling_vqbet.py | 12 +-- lerobot/common/policies/vqbet/vqbet_utils.py | 6 +- 8 files changed, 58 insertions(+), 58 deletions(-) diff --git a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py b/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py index 41dd33b62b..c31d2da0a5 100644 --- a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py +++ b/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py @@ -36,7 +36,7 @@ "robot_config": AlohaRobotConfig(), "license": "mit", "url": "https://mobile-aloha.github.io/", - "paper": "https://arxiv.org/abs/2401.02117", + "paper": "https://huggingface.co/papers/2401.02117", "citation_bibtex": dedent(r""" @inproceedings{fu2024mobile, author = {Fu, Zipeng and Zhao, Tony Z. and Finn, Chelsea}, @@ -49,7 +49,7 @@ "robot_config": AlohaRobotConfig(), "license": "mit", "url": "https://tonyzhaozh.github.io/aloha/", - "paper": "https://arxiv.org/abs/2304.13705", + "paper": "https://huggingface.co/papers/2304.13705", "citation_bibtex": dedent(r""" @article{Zhao2023LearningFB, title={Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware}, @@ -57,13 +57,13 @@ journal={RSS}, year={2023}, volume={abs/2304.13705}, - url={https://arxiv.org/abs/2304.13705} + url={https://huggingface.co/papers/2304.13705} }""").lstrip(), } PUSHT_INFO = { "license": "mit", "url": "https://diffusion-policy.cs.columbia.edu/", - "paper": "https://arxiv.org/abs/2303.04137v5", + "paper": "https://huggingface.co/papers/2303.04137v5", "citation_bibtex": dedent(r""" @article{chi2024diffusionpolicy, author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song}, @@ -75,7 +75,7 @@ XARM_INFO = { "license": "mit", "url": "https://www.nicklashansen.com/td-mpc/", - "paper": "https://arxiv.org/abs/2203.04955", + "paper": "https://huggingface.co/papers/2203.04955", "citation_bibtex": dedent(r""" @inproceedings{Hansen2022tdmpc, title={Temporal Difference Learning for Model Predictive Control}, @@ -244,7 +244,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://ut-austin-rpl.github.io/BUDS-website/", - "paper": "https://arxiv.org/abs/2109.13841", + "paper": "https://huggingface.co/papers/2109.13841", "citation_bibtex": dedent(r""" @article{zhu2022bottom, title={Bottom-Up Skill Discovery From Unsegmented Demonstrations for Long-Horizon Robot Manipulation}, @@ -261,7 +261,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://ut-austin-rpl.github.io/sailor/", - "paper": "https://arxiv.org/abs/2210.11435", + "paper": "https://huggingface.co/papers/2210.11435", "citation_bibtex": dedent(r""" @inproceedings{nasiriany2022sailor, title={Learning and Retrieval from Prior Data for Skill-based Imitation Learning}, @@ -274,7 +274,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://ut-austin-rpl.github.io/sirius/", - "paper": "https://arxiv.org/abs/2211.08416", + "paper": "https://huggingface.co/papers/2211.08416", "citation_bibtex": dedent(r""" @inproceedings{liu2022robot, title = {Robot Learning on the Job: Human-in-the-Loop Autonomy and Learning During Deployment}, @@ -298,14 +298,14 @@ "tasks_col": "language_instruction", "license": "cc-by-4.0", "url": "https://sites.google.com/view/cablerouting/home", - "paper": "https://arxiv.org/abs/2307.08927", + "paper": "https://huggingface.co/papers/2307.08927", "citation_bibtex": dedent(r""" @article{luo2023multistage, author = {Jianlan Luo and Charles Xu and Xinyang Geng and Gilbert Feng and Kuan Fang and Liam Tan and Stefan Schaal and Sergey Levine}, title = {Multi-Stage Cable Routing through Hierarchical Imitation Learning}, journal = {arXiv pre-print}, year = {2023}, - url = {https://arxiv.org/abs/2307.08927}, + url = {https://huggingface.co/papers/2307.08927}, }""").lstrip(), }, "berkeley_fanuc_manipulation": { @@ -322,7 +322,7 @@ "berkeley_gnm_cory_hall": { "tasks_col": "language_instruction", "license": "mit", - "paper": "https://arxiv.org/abs/1709.10489", + "paper": "https://huggingface.co/papers/1709.10489", "citation_bibtex": dedent(r""" @inproceedings{kahn2018self, title={Self-supervised deep reinforcement learning with generalized computation graphs for robot navigation}, @@ -337,7 +337,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://sites.google.com/view/recon-robot", - "paper": "https://arxiv.org/abs/2104.05859", + "paper": "https://huggingface.co/papers/2104.05859", "citation_bibtex": dedent(r""" @inproceedings{shah2021rapid, title={Rapid Exploration for Open-World Navigation with Latent Goal Models}, @@ -351,7 +351,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://sites.google.com/view/SACSoN-review", - "paper": "https://arxiv.org/abs/2306.01874", + "paper": "https://huggingface.co/papers/2306.01874", "citation_bibtex": dedent(r""" @article{hirose2023sacson, title={SACSoN: Scalable Autonomous Data Collection for Social Navigation}, @@ -363,7 +363,7 @@ "berkeley_mvp": { "tasks_col": "language_instruction", "license": "mit", - "paper": "https://arxiv.org/abs/2203.06173", + "paper": "https://huggingface.co/papers/2203.06173", "citation_bibtex": dedent(r""" @InProceedings{Radosavovic2022, title = {Real-World Robot Learning with Masked Visual Pre-training}, @@ -375,7 +375,7 @@ "berkeley_rpt": { "tasks_col": "language_instruction", "license": "mit", - "paper": "https://arxiv.org/abs/2306.10007", + "paper": "https://huggingface.co/papers/2306.10007", "citation_bibtex": dedent(r""" @article{Radosavovic2023, title={Robot Learning with Sensorimotor Pre-training}, @@ -388,7 +388,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://human-world-model.github.io/", - "paper": "https://arxiv.org/abs/2308.10901", + "paper": "https://huggingface.co/papers/2308.10901", "citation_bibtex": dedent(r""" @inproceedings{mendonca2023structured, title={Structured World Models from Human Videos}, @@ -401,7 +401,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://play-fusion.github.io/", - "paper": "https://arxiv.org/abs/2312.04549", + "paper": "https://huggingface.co/papers/2312.04549", "citation_bibtex": dedent(r""" @inproceedings{chen2023playfusion, title={PlayFusion: Skill Acquisition via Diffusion from Language-Annotated Play}, @@ -414,7 +414,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://robo-affordances.github.io/", - "paper": "https://arxiv.org/abs/2304.08488", + "paper": "https://huggingface.co/papers/2304.08488", "citation_bibtex": dedent(r""" @inproceedings{bahl2023affordances, title={Affordances from Human Videos as a Versatile Representation for Robotics}, @@ -433,7 +433,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://diffusion-policy.cs.columbia.edu/", - "paper": "https://arxiv.org/abs/2303.04137v5", + "paper": "https://huggingface.co/papers/2303.04137", "citation_bibtex": dedent(r""" @inproceedings{chi2023diffusionpolicy, title={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion}, @@ -505,7 +505,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://droid-dataset.github.io/", - "paper": "https://arxiv.org/abs/2403.12945", + "paper": "https://huggingface.co/papers/2403.12945", "citation_bibtex": dedent(r""" @article{khazatsky2024droid, title = {DROID: A Large-Scale In-The-Wild Robot Manipulation Dataset}, @@ -517,7 +517,7 @@ "tasks_col": "language_instruction", "license": "cc-by-4.0", "url": "https://functional-manipulation-benchmark.github.io/", - "paper": "https://arxiv.org/abs/2401.08553", + "paper": "https://huggingface.co/papers/2401.08553", "citation_bibtex": dedent(r""" @article{luo2024fmb, title={FMB: a Functional Manipulation Benchmark for Generalizable Robotic Learning}, @@ -530,7 +530,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://openreview.net/forum?id=WuBv9-IGDUA", - "paper": "https://arxiv.org/abs/2401.14502", + "paper": "https://huggingface.co/papers/2401.14502", "citation_bibtex": dedent(r""" @inproceedings{saxena2023multiresolution, title={Multi-Resolution Sensing for Real-Time Control with Vision-Language Models}, @@ -575,7 +575,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://jyopari.github.io/VINN/", - "paper": "https://arxiv.org/abs/2112.01511", + "paper": "https://huggingface.co/papers/2112.01511", "citation_bibtex": dedent(r""" @misc{pari2021surprising, title={The Surprising Effectiveness of Representation Learning for Visual Imitation}, @@ -590,7 +590,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://play-to-policy.github.io/", - "paper": "https://arxiv.org/abs/2210.10047", + "paper": "https://huggingface.co/papers/2210.10047", "citation_bibtex": dedent(r""" @article{cui2022play, title = {From Play to Policy: Conditional Behavior Generation from Uncurated Robot Data}, @@ -603,7 +603,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://rot-robot.github.io/", - "paper": "https://arxiv.org/abs/2206.15469", + "paper": "https://huggingface.co/papers/2206.15469", "citation_bibtex": dedent(r""" @inproceedings{haldar2023watch, title={Watch and match: Supercharging imitation with regularized optimal transport}, @@ -633,7 +633,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://sites.google.com/view/hydra-il-2023", - "paper": "https://arxiv.org/abs/2306.17237", + "paper": "https://huggingface.co/papers/2306.17237", "citation_bibtex": dedent(r""" @article{belkhale2023hydra, title={HYDRA: Hybrid Robot Actions for Imitation Learning}, @@ -646,21 +646,21 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://sites.google.com/view/visionandtouch", - "paper": "https://arxiv.org/abs/1810.10191", + "paper": "https://huggingface.co/papers/1810.10191", "citation_bibtex": dedent(r""" @inproceedings{lee2019icra, title={Making sense of vision and touch: Self-supervised learning of multimodal representations for contact-rich tasks}, author={Lee, Michelle A and Zhu, Yuke and Srinivasan, Krishnan and Shah, Parth and Savarese, Silvio and Fei-Fei, Li and Garg, Animesh and Bohg, Jeannette}, booktitle={2019 IEEE International Conference on Robotics and Automation (ICRA)}, year={2019}, - url={https://arxiv.org/abs/1810.10191} + url={https://huggingface.co/papers/1810.10191} }""").lstrip(), }, "stanford_robocook": { "tasks_col": "language_instruction", "license": "mit", "url": "https://hshi74.github.io/robocook/", - "paper": "https://arxiv.org/abs/2306.14447", + "paper": "https://huggingface.co/papers/2306.14447", "citation_bibtex": dedent(r""" @article{shi2023robocook, title={RoboCook: Long-Horizon Elasto-Plastic Object Manipulation with Diverse Tools}, @@ -673,7 +673,7 @@ "tasks_col": "language_instruction", "license": "cc-by-4.0", "url": "https://www.kaggle.com/datasets/oiermees/taco-robot", - "paper": "https://arxiv.org/abs/2209.08959, https://arxiv.org/abs/2210.01911", + "paper": "https://huggingface.co/papers/2209.08959, https://huggingface.co/papers/2210.01911", "citation_bibtex": dedent(r""" @inproceedings{rosete2022tacorl, author = {Erick Rosete-Beas and Oier Mees and Gabriel Kalweit and Joschka Boedecker and Wolfram Burgard}, @@ -693,7 +693,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "URL", - "paper": "https://arxiv.org/abs/2107.05842", + "paper": "https://huggingface.co/papers/2107.05842", "citation_bibtex": dedent(r""" @Article{Osa22, author = {Takayuki Osa}, @@ -709,7 +709,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://toto-benchmark.org/", - "paper": "https://arxiv.org/abs/2306.00942", + "paper": "https://huggingface.co/papers/2306.00942", "citation_bibtex": dedent(r""" @inproceedings{zhou2023train, author={Zhou, Gaoyue and Dean, Victoria and Srirama, Mohan Kumar and Rajeswaran, Aravind and Pari, Jyothish and Hatch, Kyle and Jain, Aryan and Yu, Tianhe and Abbeel, Pieter and Pinto, Lerrel and Finn, Chelsea and Gupta, Abhinav}, @@ -733,7 +733,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://owmcorl.github.io/#", - "paper": "https://arxiv.org/abs/2310.16029", + "paper": "https://huggingface.co/papers/2310.16029", "citation_bibtex": dedent(r""" @preprint{Feng2023Finetuning, title={Finetuning Offline World Models in the Real World}, @@ -745,7 +745,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://robopil.github.io/d3fields/", - "paper": "https://arxiv.org/abs/2309.16118", + "paper": "https://huggingface.co/papers/2309.16118", "citation_bibtex": dedent(r""" @article{wang2023d3field, title={D^3Field: Dynamic 3D Descriptor Fields for Generalizable Robotic Manipulation}, @@ -758,7 +758,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://uscresl.github.io/dmfd/", - "paper": "https://arxiv.org/abs/2207.10148", + "paper": "https://huggingface.co/papers/2207.10148", "citation_bibtex": dedent(r""" @article{salhotra2022dmfd, author={Salhotra, Gautam and Liu, I-Chun Arthur and Dominguez-Kuhne, Marcus and Sukhatme, Gaurav S.}, @@ -775,7 +775,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://ut-austin-rpl.github.io/MUTEX/", - "paper": "https://arxiv.org/abs/2309.14320", + "paper": "https://huggingface.co/papers/2309.14320", "citation_bibtex": dedent(r""" @inproceedings{shah2023mutex, title={{MUTEX}: Learning Unified Policies from Multimodal Task Specifications}, @@ -811,7 +811,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://saytap.github.io/", - "paper": "https://arxiv.org/abs/2306.07580", + "paper": "https://huggingface.co/papers/2306.07580", "citation_bibtex": dedent(r""" @article{saytap2023, author = {Yujin Tang and Wenhao Yu and Jie Tan and Heiga Zen and Aleksandra Faust and @@ -847,7 +847,7 @@ "tasks_col": "language_instruction", "license": "mit", "url": "https://ut-austin-rpl.github.io/VIOLA/", - "paper": "https://arxiv.org/abs/2210.11339", + "paper": "https://huggingface.co/papers/2210.11339", "citation_bibtex": dedent(r""" @article{zhu2022viola, title={VIOLA: Imitation Learning for Vision-Based Manipulation with Object Proposal Priors}, diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index 2623e16553..bbbc21b6e5 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -15,7 +15,7 @@ # limitations under the License. """Action Chunking Transformer Policy -As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://arxiv.org/abs/2304.13705). +As per Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware (https://huggingface.co/papers/2304.13705). The majority of changes here involve removing unused code, unifying naming, and adding helpful comments. """ @@ -41,7 +41,7 @@ class ACTPolicy(PreTrainedPolicy): """ Action Chunking Transformer Policy as per Learning Fine-Grained Bimanual Manipulation with Low-Cost - Hardware (paper: https://arxiv.org/abs/2304.13705, code: https://github.com/tonyzhaozh/act) + Hardware (paper: https://huggingface.co/papers/2304.13705, code: https://github.com/tonyzhaozh/act) """ config_class = ACTConfig @@ -192,7 +192,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: # Calculate Dₖₗ(latent_pdf || standard_normal). Note: After computing the KL-divergence for # each dimension independently, we sum over the latent dimension to get the total # KL-divergence per batch element, then take the mean over the batch. - # (See App. B of https://arxiv.org/abs/1312.6114 for more details). + # (See App. B of https://huggingface.co/papers/1312.6114 for more details). mean_kld = ( (-0.5 * (1 + log_sigma_x2_hat - mu_hat.pow(2) - (log_sigma_x2_hat).exp())).sum(-1).mean() ) @@ -206,7 +206,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: class ACTTemporalEnsembler: def __init__(self, temporal_ensemble_coeff: float, chunk_size: int) -> None: - """Temporal ensembling as described in Algorithm 2 of https://arxiv.org/abs/2304.13705. + """Temporal ensembling as described in Algorithm 2 of https://huggingface.co/papers/2304.13705. The weights are calculated as wᵢ = exp(-temporal_ensemble_coeff * i) where w₀ is the oldest action. They are then normalized to sum to 1 by dividing by Σwᵢ. Here's some intuition around how the diff --git a/lerobot/common/policies/diffusion/configuration_diffusion.py b/lerobot/common/policies/diffusion/configuration_diffusion.py index e73c65fe9a..c8841f06b9 100644 --- a/lerobot/common/policies/diffusion/configuration_diffusion.py +++ b/lerobot/common/policies/diffusion/configuration_diffusion.py @@ -81,7 +81,7 @@ class DiffusionConfig(PreTrainedConfig): n_groups: Number of groups used in the group norm of the Unet's convolutional blocks. diffusion_step_embed_dim: The Unet is conditioned on the diffusion timestep via a small non-linear network. This is the output dimension of that network, i.e., the embedding dimension. - use_film_scale_modulation: FiLM (https://arxiv.org/abs/1709.07871) is used for the Unet conditioning. + use_film_scale_modulation: FiLM (https://huggingface.co/papers/1709.07871) is used for the Unet conditioning. Bias modulation is used be default, while this parameter indicates whether to also use scale modulation. noise_scheduler_type: Name of the noise scheduler to use. Supported options: ["DDPM", "DDIM"]. diff --git a/lerobot/common/policies/diffusion/modeling_diffusion.py b/lerobot/common/policies/diffusion/modeling_diffusion.py index 3edaf852bc..446e2cb6ef 100644 --- a/lerobot/common/policies/diffusion/modeling_diffusion.py +++ b/lerobot/common/policies/diffusion/modeling_diffusion.py @@ -48,7 +48,7 @@ class DiffusionPolicy(PreTrainedPolicy): """ Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion" - (paper: https://arxiv.org/abs/2303.04137, code: https://github.com/real-stanford/diffusion_policy). + (paper: https://huggingface.co/papers/2303.04137, code: https://github.com/real-stanford/diffusion_policy). """ config_class = DiffusionConfig @@ -370,7 +370,7 @@ def compute_loss(self, batch: dict[str, Tensor]) -> Tensor: class SpatialSoftmax(nn.Module): """ Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al. - (https://arxiv.org/pdf/1509.06113). A minimal port of the robomimic implementation. + (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation. At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass" of activations of each channel, i.e., keypoints in the image space for the policy to focus on. @@ -728,7 +728,7 @@ def __init__( self.conv1 = DiffusionConv1dBlock(in_channels, out_channels, kernel_size, n_groups=n_groups) - # FiLM modulation (https://arxiv.org/abs/1709.07871) outputs per-channel bias and (maybe) scale. + # FiLM modulation (https://huggingface.co/papers/1709.07871) outputs per-channel bias and (maybe) scale. cond_channels = out_channels * 2 if use_film_scale_modulation else out_channels self.cond_encoder = nn.Sequential(nn.Mish(), nn.Linear(cond_dim, cond_channels)) diff --git a/lerobot/common/policies/pi0fast/modeling_pi0fast.py b/lerobot/common/policies/pi0fast/modeling_pi0fast.py index a2df40f26f..7102bdded5 100644 --- a/lerobot/common/policies/pi0fast/modeling_pi0fast.py +++ b/lerobot/common/policies/pi0fast/modeling_pi0fast.py @@ -17,7 +17,7 @@ """ π0+FAST: Efficient Action Tokenization for Vision-Language-Action Models -[Paper](https://arxiv.org/abs/2501.09747) +[Paper](https://huggingface.co/papers/2501.09747) [Jax code](https://github.com/Physical-Intelligence/openpi) Designed by Physical Intelligence. Ported from Jax by Hugging Face. diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index 31220aa935..476e6decd2 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -17,8 +17,8 @@ """Implementation of Finetuning Offline World Models in the Real World. The comments in this code may sometimes refer to these references: - TD-MPC paper: Temporal Difference Learning for Model Predictive Control (https://arxiv.org/abs/2203.04955) - FOWM paper: Finetuning Offline World Models in the Real World (https://arxiv.org/abs/2310.16029) + TD-MPC paper: Temporal Difference Learning for Model Predictive Control (https://huggingface.co/papers/2203.04955) + FOWM paper: Finetuning Offline World Models in the Real World (https://huggingface.co/papers/2310.16029) """ # ruff: noqa: N806 diff --git a/lerobot/common/policies/vqbet/modeling_vqbet.py b/lerobot/common/policies/vqbet/modeling_vqbet.py index 97a08e2f4f..44006a5b21 100644 --- a/lerobot/common/policies/vqbet/modeling_vqbet.py +++ b/lerobot/common/policies/vqbet/modeling_vqbet.py @@ -162,7 +162,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch["observation.images"] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) batch = self.normalize_targets(batch) - # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://arxiv.org/pdf/2403.03181) + # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://huggingface.co/papers/2403.03181) if not self.vqbet.action_head.vqvae_model.discretized.item(): # loss: total loss of training RVQ # n_different_codes: how many of the total possible VQ codes are being used in single batch (how many of them have at least one encoder embedding as a nearest neighbor). This can be at most `vqvae_n_embed * number of layers of RVQ (=2)`. @@ -185,7 +185,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: class SpatialSoftmax(nn.Module): """ Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al. - (https://arxiv.org/pdf/1509.06113). A minimal port of the robomimic implementation. + (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation. At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass" of activations of each channel, i.e., keypoints in the image space for the policy to focus on. @@ -387,7 +387,7 @@ def forward(self, batch: dict[str, Tensor], rollout: bool) -> tuple[dict, dict]: # only extract the output tokens at the position of action query: # Behavior Transformer (BeT), and VQ-BeT are both sequence-to-sequence prediction models, - # mapping sequential observation to sequential action (please refer to section 2.2 in BeT paper https://arxiv.org/pdf/2206.11251). + # mapping sequential observation to sequential action (please refer to section 2.2 in BeT paper https://huggingface.co/papers/2206.11251). # Thus, it predicts a historical action sequence, in addition to current and future actions (predicting future actions : optional). if len_additional_action_token > 0: features = torch.cat( @@ -824,8 +824,8 @@ def get_action_from_latent(self, latent): return einops.rearrange(output, "N (T A) -> N T A", A=self.config.action_feature.shape[0]) def get_code(self, state): - # in phase 2 of VQ-BeT training, we need a `ground truth labels of action data` to calculate the Focal loss for code prediction head. (please refer to section 3.3 in the paper https://arxiv.org/pdf/2403.03181) - # this function outputs the `GT code` of given action using frozen encoder and quantization layers. (please refer to Figure 2. in the paper https://arxiv.org/pdf/2403.03181) + # in phase 2 of VQ-BeT training, we need a `ground truth labels of action data` to calculate the Focal loss for code prediction head. (please refer to section 3.3 in the paper https://huggingface.co/papers/2403.03181) + # this function outputs the `GT code` of given action using frozen encoder and quantization layers. (please refer to Figure 2. in the paper https://huggingface.co/papers/2403.03181) state = einops.rearrange(state, "N T A -> N (T A)") with torch.no_grad(): state_rep = self.encoder(state) @@ -838,7 +838,7 @@ def get_code(self, state): return state_vq, vq_code def vqvae_forward(self, state): - # This function passes the given data through Residual VQ with Encoder and Decoder. Please refer to section 3.2 in the paper https://arxiv.org/pdf/2403.03181). + # This function passes the given data through Residual VQ with Encoder and Decoder. Please refer to section 3.2 in the paper https://huggingface.co/papers/2403.03181). state = einops.rearrange(state, "N T A -> N (T A)") # We start with passing action (or action chunk) at:t+n through the encoder ϕ. state_rep = self.encoder(state) diff --git a/lerobot/common/policies/vqbet/vqbet_utils.py b/lerobot/common/policies/vqbet/vqbet_utils.py index 139d119edc..09a86c07ba 100644 --- a/lerobot/common/policies/vqbet/vqbet_utils.py +++ b/lerobot/common/policies/vqbet/vqbet_utils.py @@ -336,7 +336,7 @@ class ResidualVQ(nn.Module): """ Residual VQ is composed of multiple VectorQuantize layers. - Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf + Follows Algorithm 1. in https://huggingface.co/papers/2107.03312 "Residual Vector Quantizer (a.k.a. multi-stage vector quantizer [36]) cascades Nq layers of VQ as follows. The unquantized input vector is passed through a first VQ and quantization residuals are computed. The residuals are then iteratively quantized by a sequence of additional Nq -1 vector quantizers, as described in Algorithm 1." @@ -1006,7 +1006,7 @@ def gumbel_sample( if not straight_through or temperature <= 0.0 or not training: return ind, one_hot - # use reinmax for better second-order accuracy - https://arxiv.org/abs/2304.08612 + # use reinmax for better second-order accuracy - https://huggingface.co/papers/2304.08612 # algorithm 2 if reinmax: @@ -1156,7 +1156,7 @@ def batched_embedding(indices, embeds): def orthogonal_loss_fn(t): - # eq (2) from https://arxiv.org/abs/2112.00384 + # eq (2) from https://huggingface.co/papers/2112.00384 h, n = t.shape[:2] normed_codes = F.normalize(t, p=2, dim=-1) cosine_sim = einsum("h i d, h j d -> h i j", normed_codes, normed_codes) From be64bd2fe01dec20903e5b696d83b2f40270c539 Mon Sep 17 00:00:00 2001 From: Simon Alibert <75076266+aliberts@users.noreply.github.com> Date: Fri, 13 Jun 2025 11:06:45 +0200 Subject: [PATCH 44/88] Skip normalization parameters in load_smolvla (#1274) --- lerobot/common/policies/smolvla/modeling_smolvla.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index a6745880b4..5e0a9622e0 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -157,9 +157,13 @@ def load_smolvla( state_dict, _ = standardise_state_dict(state_dict, set(model.state_dict().keys())) - missing, unexpected = model.load_state_dict(state_dict) + # HACK(aliberts): to not overwrite normalization parameters as they should come from the dataset + norm_keys = ("normalize_inputs", "normalize_targets", "unnormalize_outputs") + state_dict = {k: v for k, v in state_dict.items() if not k.startswith(norm_keys)} - if missing or unexpected: + missing, unexpected = model.load_state_dict(state_dict, strict=False) + + if not all(key.startswith(norm_keys) for key in missing) or unexpected: raise RuntimeError( "SmolVLA %d missing / %d unexpected keys", len(missing), From 8460c7cec2c4404ab600779caf0b5ee4c9b79e80 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Fri, 13 Jun 2025 12:41:30 +0200 Subject: [PATCH 45/88] fix(record): no teleop needed when running with policy (#1284) --- docs/source/getting_started_real_world_robot.mdx | 3 --- lerobot/record.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/source/getting_started_real_world_robot.mdx b/docs/source/getting_started_real_world_robot.mdx index 85f2311dbd..19392d4c89 100644 --- a/docs/source/getting_started_real_world_robot.mdx +++ b/docs/source/getting_started_real_world_robot.mdx @@ -297,9 +297,6 @@ python -m lerobot.record \ --robot.port=/dev/ttyACM1 \ --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ --robot.id=my_awesome_follower_arm \ - --teleop.type=so100_leader \ - --teleop.port=/dev/ttyACM0 \ - --teleop.id=my_awesome_leader_arm \ --display_data=false \ --dataset.repo_id=$HF_USER/eval_so100 \ --dataset.single_task="Put lego brick into the transparent box" \ diff --git a/lerobot/record.py b/lerobot/record.py index 531846f297..884a3fcd6d 100644 --- a/lerobot/record.py +++ b/lerobot/record.py @@ -139,7 +139,7 @@ class RecordConfig: resume: bool = False def __post_init__(self): - if bool(self.teleop) == bool(self.policy): + if self.teleop is not None and self.policy is not None: raise ValueError("Choose either a policy or a teleoperator to control the robot") # HACK: We parse again the cli args here to get the pretrained path if there was one. From 0d2800d69e0bc34b3a4abd288eed6fe9eced2b66 Mon Sep 17 00:00:00 2001 From: Adil Zouitine Date: Fri, 13 Jun 2025 13:15:47 +0200 Subject: [PATCH 46/88] Port HIL SERL (#644) Co-authored-by: Michel Aractingi Co-authored-by: Eugene Mironov Co-authored-by: s1lent4gnt Co-authored-by: Ke Wang Co-authored-by: Yoel Chornton Co-authored-by: imstevenpmwork Co-authored-by: Simon Alibert --- .gitignore | 1 + README.md | 13 + docs/source/_toctree.yml | 4 + docs/source/hilserl.mdx | 547 +++++ docs/source/hilserl_sim.mdx | 120 + lerobot/common/constants.py | 1 + lerobot/common/envs/configs.py | 116 + lerobot/common/envs/factory.py | 4 +- lerobot/common/envs/utils.py | 19 +- lerobot/common/model/kinematics.py | 483 ++++ lerobot/common/optim/optimizers.py | 122 +- lerobot/common/policies/factory.py | 14 + lerobot/common/policies/normalize.py | 166 ++ .../common/policies/sac/configuration_sac.py | 245 ++ lerobot/common/policies/sac/modeling_sac.py | 1111 +++++++++ .../reward_model/configuration_classifier.py | 76 + .../sac/reward_model/modeling_classifier.py | 316 +++ .../common/robots/so100_follower/__init__.py | 3 +- .../so100_follower/config_so100_follower.py | 24 + .../so100_follower_end_effector.py | 193 ++ lerobot/common/robots/utils.py | 4 + .../common/teleoperators/gamepad/__init__.py | 18 + .../gamepad/configuration_gamepad.py | 25 + .../teleoperators/gamepad/gamepad_utils.py | 480 ++++ .../teleoperators/gamepad/teleop_gamepad.py | 138 ++ .../so101_leader/config_so101_leader.py | 2 + .../so101_leader/so101_leader.py | 11 +- lerobot/common/teleoperators/utils.py | 4 + lerobot/common/transport/services.proto | 59 + lerobot/common/transport/services_pb2.py | 45 + lerobot/common/transport/services_pb2_grpc.py | 233 ++ lerobot/common/transport/utils.py | 141 ++ lerobot/common/utils/buffer.py | 841 +++++++ lerobot/common/utils/import_utils.py | 4 + lerobot/common/utils/process.py | 83 + lerobot/common/utils/queue.py | 39 + lerobot/common/utils/transition.py | 85 + lerobot/common/utils/utils.py | 131 +- lerobot/common/utils/wandb_utils.py | 41 +- lerobot/configs/control.py | 134 - lerobot/configs/train.py | 5 + lerobot/configs/types.py | 1 + lerobot/scripts/find_joint_limits.py | 118 + lerobot/scripts/rl/actor.py | 709 ++++++ lerobot/scripts/rl/crop_dataset_roi.py | 314 +++ lerobot/scripts/rl/gym_manipulator.py | 2171 +++++++++++++++++ lerobot/scripts/rl/learner.py | 1206 +++++++++ lerobot/scripts/rl/learner_service.py | 118 + lerobot/teleoperate.py | 2 +- pyproject.toml | 6 +- tests/optim/test_optimizers.py | 192 +- .../hilserl/test_modeling_classifier.py | 139 ++ tests/policies/test_sac_config.py | 217 ++ tests/policies/test_sac_policy.py | 541 ++++ tests/rl/test_actor.py | 208 ++ tests/rl/test_actor_learner.py | 297 +++ tests/rl/test_learner_service.py | 374 +++ tests/transport/test_transport_utils.py | 571 +++++ tests/utils/test_process.py | 112 + tests/utils/test_queue.py | 150 ++ tests/utils/test_replay_buffer.py | 682 ++++++ 61 files changed, 14066 insertions(+), 163 deletions(-) create mode 100644 docs/source/hilserl.mdx create mode 100644 docs/source/hilserl_sim.mdx create mode 100644 lerobot/common/model/kinematics.py create mode 100644 lerobot/common/policies/sac/configuration_sac.py create mode 100644 lerobot/common/policies/sac/modeling_sac.py create mode 100644 lerobot/common/policies/sac/reward_model/configuration_classifier.py create mode 100644 lerobot/common/policies/sac/reward_model/modeling_classifier.py create mode 100644 lerobot/common/robots/so100_follower/so100_follower_end_effector.py create mode 100644 lerobot/common/teleoperators/gamepad/__init__.py create mode 100644 lerobot/common/teleoperators/gamepad/configuration_gamepad.py create mode 100644 lerobot/common/teleoperators/gamepad/gamepad_utils.py create mode 100644 lerobot/common/teleoperators/gamepad/teleop_gamepad.py create mode 100644 lerobot/common/transport/services.proto create mode 100644 lerobot/common/transport/services_pb2.py create mode 100644 lerobot/common/transport/services_pb2_grpc.py create mode 100644 lerobot/common/transport/utils.py create mode 100644 lerobot/common/utils/buffer.py create mode 100644 lerobot/common/utils/process.py create mode 100644 lerobot/common/utils/queue.py create mode 100644 lerobot/common/utils/transition.py delete mode 100644 lerobot/configs/control.py create mode 100644 lerobot/scripts/find_joint_limits.py create mode 100644 lerobot/scripts/rl/actor.py create mode 100644 lerobot/scripts/rl/crop_dataset_roi.py create mode 100644 lerobot/scripts/rl/gym_manipulator.py create mode 100644 lerobot/scripts/rl/learner.py create mode 100644 lerobot/scripts/rl/learner_service.py create mode 100644 tests/policies/hilserl/test_modeling_classifier.py create mode 100644 tests/policies/test_sac_config.py create mode 100644 tests/policies/test_sac_policy.py create mode 100644 tests/rl/test_actor.py create mode 100644 tests/rl/test_actor_learner.py create mode 100644 tests/rl/test_learner_service.py create mode 100644 tests/transport/test_transport_utils.py create mode 100644 tests/utils/test_process.py create mode 100644 tests/utils/test_queue.py create mode 100644 tests/utils/test_replay_buffer.py diff --git a/.gitignore b/.gitignore index 97b6af2f8d..4ab886933e 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ outputs # VS Code .vscode +.devcontainer # HPC nautilus/*.yaml diff --git a/README.md b/README.md index 8462634f45..e98f35663a 100644 --- a/README.md +++ b/README.md @@ -418,6 +418,19 @@ Additionally, if you are using any of the particular policy architecture, pretra year={2024} } ``` + + +- [HIL-SERL](https://hil-serl.github.io/) +```bibtex +@Article{luo2024hilserl, +title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning}, +author={Jianlan Luo and Charles Xu and Jeffrey Wu and Sergey Levine}, +year={2024}, +eprint={2410.21845}, +archivePrefix={arXiv}, +primaryClass={cs.RO} +} +``` ## Star History [![Star History Chart](https://api.star-history.com/svg?repos=huggingface/lerobot&type=Timeline)](https://star-history.com/#huggingface/lerobot&Timeline) diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 5e628dec37..37938358ff 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -9,6 +9,10 @@ title: Getting Started with Real-World Robots - local: cameras title: Cameras + - local: hilserl + title: Train a Robot with RL + - local: hilserl_sim + title: Train RL in Simulation title: "Tutorials" - sections: - local: so101 diff --git a/docs/source/hilserl.mdx b/docs/source/hilserl.mdx new file mode 100644 index 0000000000..149b25c687 --- /dev/null +++ b/docs/source/hilserl.mdx @@ -0,0 +1,547 @@ +# HIL-SERL Real Robot Training Workflow Guide + +In this tutorial you will go through the full Human-in-the-Loop Sample-Efficient Reinforcement Learning (HIL-SERL) workflow using LeRobot. You will master training a policy with RL on a real robot in just a few hours. + +HIL-SERL is a sample-efficient reinforcement learning algorithm that combines human demonstrations with online learning and human interventions. The approach starts from a small set of human demonstrations, uses them to train a reward classifier, and then employs an actor-learner architecture where humans can intervene during policy execution to guide exploration and correct unsafe behaviors. In this tutorial, you'll use a gamepad to provide interventions and control the robot during the learning process. + +It combines three key ingredients: + 1. **Offline demonstrations & reward classifier:** a handful of human-teleop episodes plus a vision-based success detector give the policy a shaped starting point. + 2. **On-robot actor / learner loop with human interventions:** a distributed Soft Actor Critic (SAC) learner updates the policy while an actor explores on the physical robot; the human can jump in at any time to correct dangerous or unproductive behaviour. + 3. **Safety & efficiency tools:** joint/end-effector (EE) bounds, crop region of interest (ROI) preprocessing and WandB monitoring keep the data useful and the hardware safe. + +Together these elements let HIL-SERL reach near-perfect task success and faster cycle times than imitation-only baselines. + +

+ HIL-SERL workflow +

+ +

HIL-SERL workflow, Luo et al. 2024

+ +This guide provides step-by-step instructions for training a robot policy using LeRobot's HilSerl implementation to train on a real robot. + +## What do I need? + +- A gamepad (recommended) or keyboard to control the robot +- A Nvidia GPU +- A real robot with a follower and leader arm (optional if you use the keyboard or the gamepad) + +## What kind of tasks can I train? + +One can use HIL-SERL to train on a variety of manipulation tasks. Some recommendations: +- Start with a simple task to understand how the system works. + - Push cube to a goal region + - Pick and lift cube with the gripper +- Avoid extremely long horizon tasks. Focus on tasks that can be completed in 5-10 seconds. +- Once you have a good idea of how the system works, you can try more complex tasks and longer horizons. + - Pick and place cube + - Bimanual tasks to pick objects with two arms + - Hand-over tasks to transfer objects from one arm to another + - Go crazy! + +## Install LeRobot with HIL-SERL + +To install LeRobot with HIL-SERL, you need to install the `hilserl` extra. + +```bash +pip install -e ".[hilserl]" +``` + +## Real Robot Training Workflow + +### Understanding Configuration + +The training process begins with proper configuration for the HILSerl environment. The configuration class of interest is `HILSerlRobotEnvConfig` in `lerobot/common/envs/configs.py`. Which is defined as: + +```python +class HILSerlRobotEnvConfig(EnvConfig): + robot: RobotConfig | None = None # Main robot agent (defined in `lerobot/common/robots`) + teleop: TeleoperatorConfig | None = None # Teleoperator agent, e.g., gamepad or leader arm, (defined in `lerobot/common/teleoperators`) + wrapper: EnvTransformConfig | None = None # Environment wrapper settings; check `lerobot/scripts/server/gym_manipulator.py` + fps: int = 10 # Control frequency + name: str = "real_robot" # Environment name + mode: str = None # "record", "replay", or None (for training) + repo_id: str | None = None # LeRobot dataset repository ID + dataset_root: str | None = None # Local dataset root (optional) + task: str = "" # Task identifier + num_episodes: int = 10 # Number of episodes for recording + episode: int = 0 # episode index for replay + device: str = "cuda" # Compute device + push_to_hub: bool = True # Whether to push the recorded datasets to Hub + pretrained_policy_name_or_path: str | None = None # For policy loading + reward_classifier_pretrained_path: str | None = None # For reward model + number_of_steps_after_success: int = 0 # For reward classifier, collect more positive examples after a success to train a classifier +``` + + +### Finding Robot Workspace Bounds + +Before collecting demonstrations, you need to determine the appropriate operational bounds for your robot. + +This helps simplify the problem of learning on the real robot in two ways: 1) by limiting the robot's operational space to a specific region that solves the task and avoids unnecessary or unsafe exploration, and 2) by allowing training in end-effector space rather than joint space. Empirically, learning in joint space for reinforcement learning in manipulation is often a harder problem - some tasks are nearly impossible to learn in joint space but become learnable when the action space is transformed to end-effector coordinates. + +**Using find_joint_limits.py** + +This script helps you find the safe operational bounds for your robot's end-effector. Given that you have a follower and leader arm, you can use the script to find the bounds for the follower arm that will be applied during training. +Bounding the action space will reduce the redundant exploration of the agent and guarantees safety. + +```bash +python -m lerobot.scripts.find_joint_limits \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58760431541 \ + --robot.id=black \ + --teleop.type=so100_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ + --teleop.id=blue +``` + +**Workflow** + +1. Run the script and move the robot through the space that solves the task +2. The script will record the minimum and maximum end-effector positions and the joint angles and prints them to the console, for example: + ``` + Max ee position [0.2417 0.2012 0.1027] + Min ee position [0.1663 -0.0823 0.0336] + Max joint positions [-20.0, -20.0, -20.0, -20.0, -20.0, -20.0] + Min joint positions [50.0, 50.0, 50.0, 50.0, 50.0, 50.0] + ``` +3. Use these values in the configuration of your teleoperation device (TeleoperatorConfig) under the `end_effector_bounds` field + +**Example Configuration** + +```json +"end_effector_bounds": { + "max": [0.24, 0.20, 0.10], + "min": [0.16, -0.08, 0.03] +} +``` + +### Collecting Demonstrations + +With the bounds defined, you can safely collect demonstrations for training. Training RL with off-policy algorithm allows us to use offline datasets collected in order to improve the efficiency of the learning process. + +**Setting Up Record Mode** + +Create a configuration file for recording demonstrations (or edit an existing one like [env_config_so100.json](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/env_config_so100.json)): + +1. Set `mode` to `"record"` +2. Specify a unique `repo_id` for your dataset (e.g., "username/task_name") +3. Set `num_episodes` to the number of demonstrations you want to collect +4. Set `crop_params_dict` to `null` initially (we'll determine crops later) +5. Configure `robot`, `cameras`, and other hardware settings + +Example configuration section: +```json +"mode": "record", +"repo_id": "username/pick_lift_cube", +"dataset_root": null, +"task": "pick_and_lift", +"num_episodes": 15, +"episode": 0, +"push_to_hub": true +``` + +### Using a Teleoperation Device + +Along with your robot, you will need a teleoperation device to control it in order to collect datasets of your task and perform interventions during the online training. +We support using a gamepad or a keyboard or the leader arm of the robot. + +HIL-Serl learns actions in the end-effector space of the robot. Therefore, the teleoperation will control the end-effector's x,y,z displacements. + +For that we need to define a version of the robot that takes actions in the end-effector space. Check the robot class `SO100FollowerEndEffector` and its configuration `SO100FollowerEndEffectorConfig` for the default parameters related to the end-effector space. + +```python +class SO100FollowerEndEffectorConfig(SO100FollowerConfig): + """Configuration for the SO100FollowerEndEffector robot.""" + + # Default bounds for the end-effector position (in meters) + end_effector_bounds: dict[str, list[float]] = field( # bounds for the end-effector in x,y,z direction + default_factory=lambda: { + "min": [-1.0, -1.0, -1.0], # min x, y, z + "max": [1.0, 1.0, 1.0], # max x, y, z + } + ) + + max_gripper_pos: float = 50 # maximum gripper position that the gripper will be open at + + end_effector_step_sizes: dict[str, float] = field( # maximum step size for the end-effector in x,y,z direction + default_factory=lambda: { + "x": 0.02, + "y": 0.02, + "z": 0.02, + } + ) +``` + +The `Teleoperator` defines the teleoperation device. You can check the list of available teleoperators in `lerobot/common/teleoperators`. + +**Setting up the Gamepad** + +The gamepad provides a very convenient way to control the robot and the episode state. + +To setup the gamepad, you need to set the `control_mode` to `"gamepad"` and define the `teleop` section in the configuration file. + +```json + "teleop": { + "type": "gamepad", + "use_gripper": true + }, +``` + +

+ Figure shows the control mappings on a Logitech gamepad. +

+

Gamepad button mapping for robot control and episode management

+ +**Setting up the SO101 leader** + +The SO101 leader arm has reduced gears that allows it to move and track the follower arm during exploration. Therefore, taking over is much smoother than the gearless SO100. + +To setup the SO101 leader, you need to set the `control_mode` to `"leader"` and define the `teleop` section in the configuration file. + +```json + "teleop": { + "type": "so101_leader", + "port": "/dev/tty.usbmodem585A0077921", # check your port number + "use_degrees": true + }, +``` + +In order to annotate the success/failure of the episode, **you will need** to use a keyboard to press `s` for success, `esc` for failure. +During the online training, press `space` to take over the policy and `space` again to give the control back to the policy. + +
+Video: SO101 leader teleoperation + +
+ +
+ +

SO101 leader teleoperation example, the leader tracks the follower, press `space` to intervene

+
+ +**Recording Demonstrations** + +Start the recording process, an example of the config file can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/env_config_so100.json): + +```bash +python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/env_config_so100.json +``` + +During recording: +1. The robot will reset to the initial position defined in the configuration file `fixed_reset_joint_positions` +2. Complete the task successfully +3. The episode ends with a reward of 1 when you press the "success" button +4. If the time limit is reached, or the fail button is pressed, the episode ends with a reward of 0 +5. You can rerecord an episode by pressing the "rerecord" button +6. The process automatically continues to the next episode +7. After recording all episodes, the dataset is pushed to the Hugging Face Hub (optional) and saved locally + + +### Processing the Dataset + +After collecting demonstrations, process them to determine optimal camera crops. +Reinforcement learning is sensitive to background distractions, so it is important to crop the images to the relevant workspace area. + +Visual RL algorithms learn directly from pixel inputs, making them vulnerable to irrelevant visual information. Background elements like changing lighting, shadows, people moving, or objects outside the workspace can confuse the learning process. Good ROI selection should: +- Include only the essential workspace where the task happens +- Capture the robot's end-effector and all objects involved in the task +- Exclude unnecessary background elements and distractions + +Note: If you already know the crop parameters, you can skip this step and just set the `crop_params_dict` in the configuration file during recording. + +**Determining Crop Parameters** + +Use the `crop_dataset_roi.py` script to interactively select regions of interest in your camera images: + +```bash +python lerobot/scripts/rl/crop_dataset_roi.py --repo-id username/pick_lift_cube +``` + +1. For each camera view, the script will display the first frame +2. Draw a rectangle around the relevant workspace area +3. Press 'c' to confirm the selection +4. Repeat for all camera views +5. The script outputs cropping parameters and creates a new cropped dataset + +Example output: +``` +Selected Rectangular Regions of Interest (top, left, height, width): +observation.images.side: [180, 207, 180, 200] +observation.images.front: [180, 250, 120, 150] +``` + +

+ +

+ +

Interactive cropping tool for selecting regions of interest

+ + +**Updating Configuration** + +Add these crop parameters to your training configuration: + +```json +"crop_params_dict": { + "observation.images.side": [180, 207, 180, 200], + "observation.images.front": [180, 250, 120, 150] +}, +"resize_size": [128, 128] +``` + +**Recommended image resolution** + +Most vision-based policies have been validated on square inputs of either **128×128** (default) or **64×64** pixels. We therefore advise setting the resize_size parameter to [128, 128] – or [64, 64] if you need to save GPU memory and bandwidth. Other resolutions are possible but have not been extensively tested. + + +### Training a Reward Classifier + +The reward classifier plays an important role in the HIL-SERL workflow by automating reward assignment and automatically detecting episode success. Instead of manually defining reward functions or relying on human feedback for every timestep, the reward classifier learns to predict success/failure from visual observations. This enables the RL algorithm to learn efficiently by providing consistent and automated reward signals based on the robot's camera inputs. + +This guide explains how to train a reward classifier for human-in-the-loop reinforcement learning implementation of LeRobot. Reward classifiers learn to predict the reward value given a state which can be used in an RL setup to train a policy. + +**Note**: Training a reward classifier is optional. You can start the first round of RL experiments by annotating the success manually with your gamepad or keyboard device. + +The reward classifier implementation in `modeling_classifier.py` uses a pretrained vision model to process the images. It can output either a single value for binary rewards to predict success/fail cases or multiple values for multi-class settings. + +**Collecting a Dataset for the reward classifier** + +Before training, you need to collect a dataset with labeled examples. The `record_dataset` function in `gym_manipulator.py` enables the process of collecting a dataset of observations, actions, and rewards. + +To collect a dataset, you need to modify some parameters in the environment configuration based on HILSerlRobotEnvConfig. + +```bash +python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/reward_classifier_train_config.json +``` + +**Key Parameters for Data Collection** + +- **mode**: set it to `"record"` to collect a dataset +- **repo_id**: `"hf_username/dataset_name"`, name of the dataset and repo on the hub +- **num_episodes**: Number of episodes to record +- **number_of_steps_after_success**: Number of additional frames to record after a success (reward=1) is detected +- **fps**: Number of frames per second to record +- **push_to_hub**: Whether to push the dataset to the hub + +The `number_of_steps_after_success` parameter is crucial as it allows you to collect more positive examples. When a success is detected, the system will continue recording for the specified number of steps while maintaining the reward=1 label. Otherwise, there won't be enough states in the dataset labeled to 1 to train a good classifier. + +Example configuration section for data collection: + +```json +{ + "mode": "record", + "repo_id": "hf_username/dataset_name", + "dataset_root": "data/your_dataset", + "num_episodes": 20, + "push_to_hub": true, + "fps": 10, + "number_of_steps_after_success": 15 +} +``` + +**Reward Classifier Configuration** + +The reward classifier is configured using `configuration_classifier.py`. Here are the key parameters: + +- **model_name**: Base model architecture (e.g., we mainly use `"helper2424/resnet10"`) +- **model_type**: `"cnn"` or `"transformer"` +- **num_cameras**: Number of camera inputs +- **num_classes**: Number of output classes (typically 2 for binary success/failure) +- **hidden_dim**: Size of hidden representation +- **dropout_rate**: Regularization parameter +- **learning_rate**: Learning rate for optimizer + +Example configuration for training the [reward classifier](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/reward_classifier_train_config.json): + +```json +{ + "policy": { + "type": "reward_classifier", + "model_name": "helper2424/resnet10", + "model_type": "cnn", + "num_cameras": 2, + "num_classes": 2, + "hidden_dim": 256, + "dropout_rate": 0.1, + "learning_rate": 1e-4, + "device": "cuda", + "use_amp": true, + "input_features": { + "observation.images.front": { + "type": "VISUAL", + "shape": [3, 128, 128] + }, + "observation.images.side": { + "type": "VISUAL", + "shape": [3, 128, 128] + } + } + } +} +``` + +**Training the Classifier** + +To train the classifier, use the `train.py` script with your configuration: + +```bash +python lerobot/scripts/train.py --config_path path/to/reward_classifier_train_config.json +``` + +**Deploying and Testing the Model** + +To use your trained reward classifier, configure the `HILSerlRobotEnvConfig` to use your model: + +```python +env_config = HILSerlRobotEnvConfig( + reward_classifier_pretrained_path="path_to_your_pretrained_trained_model", + # Other environment parameters +) +``` +or set the argument in the json config file. + +```json +{ + "reward_classifier_pretrained_path": "path_to_your_pretrained_model" +} +``` + +Run `gym_manipulator.py` to test the model. +```bash +python lerobot/scripts/rl/gym_manipulator.py --config_path path/to/env_config.json +``` + +The reward classifier will automatically provide rewards based on the visual input from the robot's cameras. + +**Example Workflow for training the reward classifier** + +1. **Create the configuration files**: + Create the necessary json configuration files for the reward classifier and the environment. Check the examples [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/tree/main). + +2. **Collect a dataset**: + ```bash + python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/env_config.json + ``` + +3. **Train the classifier**: + ```bash + python lerobot/scripts/train.py --config_path lerobot/configs/reward_classifier_train_config.json + ``` + +4. **Test the classifier**: + ```bash + python lerobot/scripts/rl/gym_manipulator.py --config_path lerobot/configs/env_config.json + ``` + +### Training with Actor-Learner + +The LeRobot system uses a distributed actor-learner architecture for training. This architecture decouples robot interactions from the learning process, allowing them to run concurrently without blocking each other. The actor server handles robot observations and actions, sending interaction data to the learner server. The learner server performs gradient descent and periodically updates the actor's policy weights. You will need to start two processes: a learner and an actor. + +**Configuration Setup** + +Create a training configuration file (example available [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/train_config_hilserl_so100.json)). The training config is based on the main `TrainRLServerPipelineConfig` class in `lerobot/configs/train.py`. + +1. Configure the policy settings (`type="sac"`, `device`, etc.) +2. Set `dataset` to your cropped dataset +3. Configure environment settings with crop parameters +4. Check the other parameters related to SAC in [configuration_sac.py](https://github.com/huggingface/lerobot/blob/19bb621a7d0a31c20cd3cc08b1dbab68d3031454/lerobot/common/policies/sac/configuration_sac.py#L79). +5. Verify that the `policy` config is correct with the right `input_features` and `output_features` for your task. + +**Starting the Learner** + +First, start the learner server process: + +```bash +python lerobot/scripts/rl/learner.py --config_path lerobot/configs/train_config_hilserl_so100.json +``` + +The learner: +- Initializes the policy network +- Prepares replay buffers +- Opens a `gRPC` server to communicate with actors +- Processes transitions and updates the policy + +**Starting the Actor** + +In a separate terminal, start the actor process with the same configuration: + +```bash +python lerobot/scripts/rl/actor.py --config_path lerobot/configs/train_config_hilserl_so100.json +``` + +The actor: +- Connects to the learner via `gRPC` +- Initializes the environment +- Execute rollouts of the policy to collect experience +- Sends transitions to the learner +- Receives updated policy parameters + +**Training Flow** + +The training proceeds automatically: + +1. The actor executes the policy in the environment +2. Transitions are collected and sent to the learner +3. The learner updates the policy based on these transitions +4. Updated policy parameters are sent back to the actor +5. The process continues until the specified step limit is reached + +**Human in the Loop** + +- The key to learning efficiently is to have human interventions to provide corrective feedback and completing the task to aide the policy learning and exploration. +- To perform human interventions, you can press the upper right trigger button on the gamepad (or the `space` key on the keyboard). This will pause the policy actions and allow you to take over. +- A successful experiment is one where the human has to intervene at the start but then reduces the amount of interventions as the policy improves. You can monitor the intervention rate in the `wandb` dashboard. + +

+ Figure shows the control mappings on a Logitech gamepad. +

+ +

Example showing how human interventions help guide policy learning over time

+ +- The figure shows the plot of the episodic reward over interaction step. The figure shows the effect of human interventions on the policy learning. +- The orange curve is an experiment without any human interventions. While the pink and blue curves are experiments with human interventions. +- We can observe that the number of steps where the policy starts achieving the maximum reward is cut by a quarter when human interventions are present. + +**Monitoring and Debugging** + +If you have `wandb.enable` set to `true` in your configuration, you can monitor training progress in real-time through the [Weights & Biases](https://wandb.ai/site/) dashboard. + +### Guide to Human Interventions +The learning process is very sensitive to the intervention strategy. It will takes a few runs to understand how to intervene effectively. Some tips and hints: +- Allow the policy to explore for a few episodes at the start of training. +- Avoid intervening for long periods of time. Try to intervene in situation to correct the robot's behaviour when it goes off track. +- Once the policy starts achieving the task, even if its not perfect, you can limit your interventions to simple quick actions like a simple grasping commands. + +The ideal behaviour is that your intervention rate should drop gradually during training as shown in the figure below. + +

+ Intervention rate +

+ +

Plot of the intervention rate during a training run on a pick and lift cube task

+ +### Key hyperparameters to tune + +Some configuration values have a disproportionate impact on training stability and speed: + +- **`temperature_init`** (`policy.temperature_init`) – initial entropy temperature in SAC. Higher values encourage more exploration; lower values make the policy more deterministic early on. A good starting point is `1e-2`. We observed that setting it too high can make human interventions ineffective and slow down learning. +- **`policy_parameters_push_frequency`** (`policy.actor_learner_config.policy_parameters_push_frequency`) – interval in *seconds* between two weight pushes from the learner to the actor. The default is `4 s`. Decrease to **1-2 s** to provide fresher weights (at the cost of more network traffic); increase only if your connection is slow, as this will reduce sample efficiency. +- **`storage_device`** (`policy.storage_device`) – device on which the learner keeps the policy parameters. If you have spare GPU memory, set this to `"cuda"` (instead of the default `"cpu"`). Keeping the weights on-GPU removes CPU→GPU transfer overhead and can significantly increase the number of learner updates per second. + + +Congrats 🎉, you have finished this tutorial! + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). + +Paper citation: +``` +@article{luo2024precise, + title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning}, + author={Luo, Jianlan and Xu, Charles and Wu, Jeffrey and Levine, Sergey}, + journal={arXiv preprint arXiv:2410.21845}, + year={2024} +} +``` diff --git a/docs/source/hilserl_sim.mdx b/docs/source/hilserl_sim.mdx new file mode 100644 index 0000000000..3239ba91ac --- /dev/null +++ b/docs/source/hilserl_sim.mdx @@ -0,0 +1,120 @@ +# Train RL in Simulation + +This guide explains how to use the `gym_hil` simulation environments as an alternative to real robots when working with the LeRobot framework for Human-In-the-Loop (HIL) reinforcement learning. + +`gym_hil` is a package that provides Gymnasium-compatible simulation environments specifically designed for Human-In-the-Loop reinforcement learning. These environments allow you to: + +- Train policies in simulation to test the RL stack before training on real robots + +- Collect demonstrations in sim using external devices like gamepads or keyboards +- Perform human interventions during policy learning + +Currently, the main environment is a Franka Panda robot simulation based on MuJoCo, with tasks like picking up a cube. + + +## Installation + +First, install the `gym_hil` package within the LeRobot environment: + +```bash +pip install -e ".[hilserl]" +``` + +## What do I need? + +- A gamepad or keyboard to control the robot +- A Nvidia GPU + + + +## Configuration + +To use `gym_hil` with LeRobot, you need to create a configuration file. An example is provided [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/gym_hil_env.json). Key configuration sections include: + +### Environment Type and Task + +```json +{ + "type": "hil", + "name": "franka_sim", + "task": "PandaPickCubeGamepad-v0", + "device": "cuda" +} +``` + +Available tasks: +- `PandaPickCubeBase-v0`: Basic environment +- `PandaPickCubeGamepad-v0`: With gamepad control +- `PandaPickCubeKeyboard-v0`: With keyboard control + +### Gym Wrappers Configuration + +```json +"wrapper": { + "gripper_penalty": -0.02, + "control_time_s": 15.0, + "use_gripper": true, + "fixed_reset_joint_positions": [0.0, 0.195, 0.0, -2.43, 0.0, 2.62, 0.785], + "end_effector_step_sizes": { + "x": 0.025, + "y": 0.025, + "z": 0.025 + }, + "control_mode": "gamepad" + } +``` + +Important parameters: +- `gripper_penalty`: Penalty for excessive gripper movement +- `use_gripper`: Whether to enable gripper control +- `end_effector_step_sizes`: Size of the steps in the x,y,z axes of the end-effector +- `control_mode`: Set to `"gamepad"` to use a gamepad controller + +## Running with HIL RL of LeRobot + +### Basic Usage + +To run the environment, set mode to null: + +```python +python lerobot/scripts/rl/gym_manipulator.py --config_path path/to/gym_hil_env.json +``` + +### Recording a Dataset + +To collect a dataset, set the mode to `record` whilst defining the repo_id and number of episodes to record: + +```python +python lerobot/scripts/rl/gym_manipulator.py --config_path path/to/gym_hil_env.json +``` + +### Training a Policy + +To train a policy, checkout the configuration example available [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/train_gym_hil_env.json) and run the actor and learner servers: + +```python +python lerobot/scripts/rl/actor.py --config_path path/to/train_gym_hil_env.json +``` + +In a different terminal, run the learner server: + +```python +python lerobot/scripts/rl/learner.py --config_path path/to/train_gym_hil_env.json +``` + +The simulation environment provides a safe and repeatable way to develop and test your Human-In-the-Loop reinforcement learning components before deploying to real robots. + +Congrats 🎉, you have finished this tutorial! + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). + +Paper citation: +``` +@article{luo2024precise, + title={Precise and Dexterous Robotic Manipulation via Human-in-the-Loop Reinforcement Learning}, + author={Luo, Jianlan and Xu, Charles and Wu, Jeffrey and Levine, Sergey}, + journal={arXiv preprint arXiv:2410.21845}, + year={2024} +} +``` diff --git a/lerobot/common/constants.py b/lerobot/common/constants.py index e78e748baf..990f2aa1eb 100644 --- a/lerobot/common/constants.py +++ b/lerobot/common/constants.py @@ -22,6 +22,7 @@ OBS_IMAGE = "observation.image" OBS_IMAGES = "observation.images" ACTION = "action" +REWARD = "next.reward" ROBOTS = "robots" TELEOPERATORS = "teleoperators" diff --git a/lerobot/common/envs/configs.py b/lerobot/common/envs/configs.py index c99fba811d..ea081e9fbf 100644 --- a/lerobot/common/envs/configs.py +++ b/lerobot/common/envs/configs.py @@ -14,10 +14,13 @@ import abc from dataclasses import dataclass, field +from typing import Any, Optional import draccus from lerobot.common.constants import ACTION, OBS_ENV_STATE, OBS_IMAGE, OBS_IMAGES, OBS_STATE +from lerobot.common.robots import RobotConfig +from lerobot.common.teleoperators.config import TeleoperatorConfig from lerobot.configs.types import FeatureType, PolicyFeature @@ -155,3 +158,116 @@ def gym_kwargs(self) -> dict: "visualization_height": self.visualization_height, "max_episode_steps": self.episode_length, } + + +@dataclass +class VideoRecordConfig: + """Configuration for video recording in ManiSkill environments.""" + + enabled: bool = False + record_dir: str = "videos" + trajectory_name: str = "trajectory" + + +@dataclass +class EnvTransformConfig: + """Configuration for environment wrappers.""" + + # ee_action_space_params: EEActionSpaceConfig = field(default_factory=EEActionSpaceConfig) + control_mode: str = "gamepad" + display_cameras: bool = False + add_joint_velocity_to_observation: bool = False + add_current_to_observation: bool = False + add_ee_pose_to_observation: bool = False + crop_params_dict: Optional[dict[str, tuple[int, int, int, int]]] = None + resize_size: Optional[tuple[int, int]] = None + control_time_s: float = 20.0 + fixed_reset_joint_positions: Optional[Any] = None + reset_time_s: float = 5.0 + use_gripper: bool = True + gripper_quantization_threshold: float | None = 0.8 + gripper_penalty: float = 0.0 + gripper_penalty_in_reward: bool = False + + +@EnvConfig.register_subclass(name="gym_manipulator") +@dataclass +class HILSerlRobotEnvConfig(EnvConfig): + """Configuration for the HILSerlRobotEnv environment.""" + + robot: Optional[RobotConfig] = None + teleop: Optional[TeleoperatorConfig] = None + wrapper: Optional[EnvTransformConfig] = None + fps: int = 10 + name: str = "real_robot" + mode: str = None # Either "record", "replay", None + repo_id: Optional[str] = None + dataset_root: Optional[str] = None + task: str = "" + num_episodes: int = 10 # only for record mode + episode: int = 0 + device: str = "cuda" + push_to_hub: bool = True + pretrained_policy_name_or_path: Optional[str] = None + reward_classifier_pretrained_path: Optional[str] = None + # For the reward classifier, to record more positive examples after a success + number_of_steps_after_success: int = 0 + + def gym_kwargs(self) -> dict: + return {} + + +@EnvConfig.register_subclass("hil") +@dataclass +class HILEnvConfig(EnvConfig): + """Configuration for the HIL environment.""" + + type: str = "hil" + name: str = "PandaPickCube" + task: str = "PandaPickCubeKeyboard-v0" + use_viewer: bool = True + gripper_penalty: float = 0.0 + use_gamepad: bool = True + state_dim: int = 18 + action_dim: int = 4 + fps: int = 100 + episode_length: int = 100 + video_record: VideoRecordConfig = field(default_factory=VideoRecordConfig) + features: dict[str, PolicyFeature] = field( + default_factory=lambda: { + "action": PolicyFeature(type=FeatureType.ACTION, shape=(4,)), + "observation.image": PolicyFeature(type=FeatureType.VISUAL, shape=(3, 128, 128)), + "observation.state": PolicyFeature(type=FeatureType.STATE, shape=(18,)), + } + ) + features_map: dict[str, str] = field( + default_factory=lambda: { + "action": ACTION, + "observation.image": OBS_IMAGE, + "observation.state": OBS_STATE, + } + ) + ################# args from hilserlrobotenv + reward_classifier_pretrained_path: Optional[str] = None + robot_config: Optional[RobotConfig] = None + teleop_config: Optional[TeleoperatorConfig] = None + wrapper: Optional[EnvTransformConfig] = None + mode: str = None # Either "record", "replay", None + repo_id: Optional[str] = None + dataset_root: Optional[str] = None + num_episodes: int = 10 # only for record mode + episode: int = 0 + device: str = "cuda" + push_to_hub: bool = True + pretrained_policy_name_or_path: Optional[str] = None + # For the reward classifier, to record more positive examples after a success + number_of_steps_after_success: int = 0 + ############################ + + @property + def gym_kwargs(self) -> dict: + return { + "use_viewer": self.use_viewer, + "use_gamepad": self.use_gamepad, + "gripper_penalty": self.gripper_penalty, + } diff --git a/lerobot/common/envs/factory.py b/lerobot/common/envs/factory.py index 8450f84b95..4f5d59c698 100644 --- a/lerobot/common/envs/factory.py +++ b/lerobot/common/envs/factory.py @@ -17,7 +17,7 @@ import gymnasium as gym -from lerobot.common.envs.configs import AlohaEnv, EnvConfig, PushtEnv, XarmEnv +from lerobot.common.envs.configs import AlohaEnv, EnvConfig, HILEnvConfig, PushtEnv, XarmEnv def make_env_config(env_type: str, **kwargs) -> EnvConfig: @@ -27,6 +27,8 @@ def make_env_config(env_type: str, **kwargs) -> EnvConfig: return PushtEnv(**kwargs) elif env_type == "xarm": return XarmEnv(**kwargs) + elif env_type == "hil": + return HILEnvConfig(**kwargs) else: raise ValueError(f"Policy type '{env_type}' is not available.") diff --git a/lerobot/common/envs/utils.py b/lerobot/common/envs/utils.py index 83334f876d..66d6e5f93f 100644 --- a/lerobot/common/envs/utils.py +++ b/lerobot/common/envs/utils.py @@ -47,6 +47,10 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten # TODO(aliberts, rcadene): use transforms.ToTensor()? img = torch.from_numpy(img) + # When preprocessing observations in a non-vectorized environment, we need to add a batch dimension. + # This is the case for human-in-the-loop RL where there is only one environment. + if img.ndim == 3: + img = img.unsqueeze(0) # sanity check that images are channel last _, h, w, c = img.shape assert c < h and c < w, f"expect channel last images, but instead got {img.shape=}" @@ -62,13 +66,18 @@ def preprocess_observation(observations: dict[str, np.ndarray]) -> dict[str, Ten return_observations[imgkey] = img if "environment_state" in observations: - return_observations["observation.environment_state"] = torch.from_numpy( - observations["environment_state"] - ).float() + env_state = torch.from_numpy(observations["environment_state"]).float() + if env_state.dim() == 1: + env_state = env_state.unsqueeze(0) + + return_observations["observation.environment_state"] = env_state # TODO(rcadene): enable pixels only baseline with `obs_type="pixels"` in environment by removing - # requirement for "agent_pos" - return_observations["observation.state"] = torch.from_numpy(observations["agent_pos"]).float() + agent_pos = torch.from_numpy(observations["agent_pos"]).float() + if agent_pos.dim() == 1: + agent_pos = agent_pos.unsqueeze(0) + return_observations["observation.state"] = agent_pos + return return_observations diff --git a/lerobot/common/model/kinematics.py b/lerobot/common/model/kinematics.py new file mode 100644 index 0000000000..367b609e19 --- /dev/null +++ b/lerobot/common/model/kinematics.py @@ -0,0 +1,483 @@ +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numpy as np +from numpy.typing import NDArray +from scipy.spatial.transform import Rotation + + +def skew_symmetric(w: NDArray[np.float32]) -> NDArray[np.float32]: + """Creates the skew-symmetric matrix from a 3D vector.""" + return np.array([[0, -w[2], w[1]], [w[2], 0, -w[0]], [-w[1], w[0], 0]]) + + +def rodrigues_rotation(w: NDArray[np.float32], theta: float) -> NDArray[np.float32]: + """Computes the rotation matrix using Rodrigues' formula.""" + w_hat = skew_symmetric(w) + return np.eye(3) + np.sin(theta) * w_hat + (1 - np.cos(theta)) * w_hat @ w_hat + + +def screw_axis_to_transform(s: NDArray[np.float32], theta: float) -> NDArray[np.float32]: + """Converts a screw axis to a 4x4 transformation matrix.""" + screw_axis_rot = s[:3] + screw_axis_trans = s[3:] + + # Pure translation + if np.allclose(screw_axis_rot, 0) and np.linalg.norm(screw_axis_trans) == 1: + transform = np.eye(4) + transform[:3, 3] = screw_axis_trans * theta + + # Rotation (and potentially translation) + elif np.linalg.norm(screw_axis_rot) == 1: + w_hat = skew_symmetric(screw_axis_rot) + rot_mat = np.eye(3) + np.sin(theta) * w_hat + (1 - np.cos(theta)) * w_hat @ w_hat + t = ( + np.eye(3) * theta + (1 - np.cos(theta)) * w_hat + (theta - np.sin(theta)) * w_hat @ w_hat + ) @ screw_axis_trans + transform = np.eye(4) + transform[:3, :3] = rot_mat + transform[:3, 3] = t + else: + raise ValueError("Invalid screw axis parameters") + return transform + + +def pose_difference_se3(pose1: NDArray[np.float32], pose2: NDArray[np.float32]) -> NDArray[np.float32]: + """ + Calculates the SE(3) difference between two 4x4 homogeneous transformation matrices. + SE(3) (Special Euclidean Group) represents rigid body transformations in 3D space, + combining rotation (SO(3)) and translation. + + Each 4x4 matrix has the following structure: + [R11 R12 R13 tx] + [R21 R22 R23 ty] + [R31 R32 R33 tz] + [ 0 0 0 1] + + where R is the 3x3 rotation matrix and [tx,ty,tz] is the translation vector. + + Args: + pose1: A 4x4 numpy array representing the first pose. + pose2: A 4x4 numpy array representing the second pose. + + Returns: + A 6D numpy array concatenating translation and rotation differences. + First 3 elements are the translational difference (position). + Last 3 elements are the rotational difference in axis-angle representation. + """ + rot1 = pose1[:3, :3] + rot2 = pose2[:3, :3] + + translation_diff = pose1[:3, 3] - pose2[:3, 3] + + # Calculate rotational difference using scipy's Rotation library + rot_diff = Rotation.from_matrix(rot1 @ rot2.T) + rotation_diff = rot_diff.as_rotvec() # Axis-angle representation + + return np.concatenate([translation_diff, rotation_diff]) + + +def se3_error(target_pose: NDArray[np.float32], current_pose: NDArray[np.float32]) -> NDArray[np.float32]: + pos_error = target_pose[:3, 3] - current_pose[:3, 3] + + rot_target = target_pose[:3, :3] + rot_current = current_pose[:3, :3] + rot_error_mat = rot_target @ rot_current.T + rot_error = Rotation.from_matrix(rot_error_mat).as_rotvec() + + return np.concatenate([pos_error, rot_error]) + + +class RobotKinematics: + """Robot kinematics class supporting multiple robot models.""" + + # Robot measurements dictionary + ROBOT_MEASUREMENTS = { + "koch": { + "gripper": [0.239, -0.001, 0.024], + "wrist": [0.209, 0, 0.024], + "forearm": [0.108, 0, 0.02], + "humerus": [0, 0, 0.036], + "shoulder": [0, 0, 0], + "base": [0, 0, 0.02], + }, + "moss": { + "gripper": [0.246, 0.013, 0.111], + "wrist": [0.245, 0.002, 0.064], + "forearm": [0.122, 0, 0.064], + "humerus": [0.001, 0.001, 0.063], + "shoulder": [0, 0, 0], + "base": [0, 0, 0.02], + }, + "so_old_calibration": { + "gripper": [0.320, 0, 0.050], + "wrist": [0.278, 0, 0.050], + "forearm": [0.143, 0, 0.044], + "humerus": [0.031, 0, 0.072], + "shoulder": [0, 0, 0], + "base": [0, 0, 0.02], + }, + "so_new_calibration": { + "gripper": [0.33, 0.0, 0.285], + "wrist": [0.30, 0.0, 0.267], + "forearm": [0.25, 0.0, 0.266], + "humerus": [0.06, 0.0, 0.264], + "shoulder": [0.0, 0.0, 0.238], + "base": [0.0, 0.0, 0.12], + }, + } + + def __init__(self, robot_type: str = "so100"): + """Initialize kinematics for the specified robot type. + + Args: + robot_type: String specifying the robot model ("koch", "so100", or "moss") + """ + if robot_type not in self.ROBOT_MEASUREMENTS: + raise ValueError( + f"Unknown robot type: {robot_type}. Available types: {list(self.ROBOT_MEASUREMENTS.keys())}" + ) + + self.robot_type = robot_type + self.measurements = self.ROBOT_MEASUREMENTS[robot_type] + + # Initialize all transformation matrices and screw axes + self._setup_transforms() + + def _create_translation_matrix( + self, x: float = 0.0, y: float = 0.0, z: float = 0.0 + ) -> NDArray[np.float32]: + """Create a 4x4 translation matrix.""" + return np.array([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]]) + + def _setup_transforms(self): + """Setup all transformation matrices and screw axes for the robot.""" + # Set up rotation matrices (constant across robot types) + + # Gripper orientation + self.gripper_X0 = np.array( + [ + [1, 0, 0, 0], + [0, 0, 1, 0], + [0, -1, 0, 0], + [0, 0, 0, 1], + ], + dtype=np.float32, + ) + + # Wrist orientation + self.wrist_X0 = np.array( + [ + [0, -1, 0, 0], + [1, 0, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], + dtype=np.float32, + ) + + # Base orientation + self.base_X0 = np.array( + [ + [0, 0, 1, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 1], + ], + dtype=np.float32, + ) + + # Gripper + # Screw axis of gripper frame wrt base frame + self.S_BG = np.array( + [ + 1, + 0, + 0, + 0, + self.measurements["gripper"][2], + -self.measurements["gripper"][1], + ], + dtype=np.float32, + ) + + # Gripper origin to centroid transform + self.X_GoGc = self._create_translation_matrix(x=0.07) + + # Gripper origin to tip transform + self.X_GoGt = self._create_translation_matrix(x=0.12) + + # 0-position gripper frame pose wrt base + self.X_BoGo = self._create_translation_matrix( + x=self.measurements["gripper"][0], + y=self.measurements["gripper"][1], + z=self.measurements["gripper"][2], + ) + + # Wrist + # Screw axis of wrist frame wrt base frame + self.S_BR = np.array( + [0, 1, 0, -self.measurements["wrist"][2], 0, self.measurements["wrist"][0]], dtype=np.float32 + ) + + # 0-position origin to centroid transform + self.X_RoRc = self._create_translation_matrix(x=0.0035, y=-0.002) + + # 0-position wrist frame pose wrt base + self.X_BR = self._create_translation_matrix( + x=self.measurements["wrist"][0], + y=self.measurements["wrist"][1], + z=self.measurements["wrist"][2], + ) + + # Forearm + # Screw axis of forearm frame wrt base frame + self.S_BF = np.array( + [ + 0, + 1, + 0, + -self.measurements["forearm"][2], + 0, + self.measurements["forearm"][0], + ], + dtype=np.float32, + ) + + # Forearm origin + centroid transform + self.X_ForearmFc = self._create_translation_matrix(x=0.036) + + # 0-position forearm frame pose wrt base + self.X_BF = self._create_translation_matrix( + x=self.measurements["forearm"][0], + y=self.measurements["forearm"][1], + z=self.measurements["forearm"][2], + ) + + # Humerus + # Screw axis of humerus frame wrt base frame + self.S_BH = np.array( + [ + 0, + -1, + 0, + self.measurements["humerus"][2], + 0, + -self.measurements["humerus"][0], + ], + dtype=np.float32, + ) + + # Humerus origin to centroid transform + self.X_HoHc = self._create_translation_matrix(x=0.0475) + + # 0-position humerus frame pose wrt base + self.X_BH = self._create_translation_matrix( + x=self.measurements["humerus"][0], + y=self.measurements["humerus"][1], + z=self.measurements["humerus"][2], + ) + + # Shoulder + # Screw axis of shoulder frame wrt Base frame + self.S_BS = np.array([0, 0, -1, 0, 0, 0], dtype=np.float32) + + # Shoulder origin to centroid transform + self.X_SoSc = self._create_translation_matrix(x=-0.017, z=0.0235) + + # 0-position shoulder frame pose wrt base + self.X_BS = self._create_translation_matrix( + x=self.measurements["shoulder"][0], + y=self.measurements["shoulder"][1], + z=self.measurements["shoulder"][2], + ) + + # Base + # Base origin to centroid transform + self.X_BoBc = self._create_translation_matrix(y=0.015) + + # World to base transform + self.X_WoBo = self._create_translation_matrix( + x=self.measurements["base"][0], + y=self.measurements["base"][1], + z=self.measurements["base"][2], + ) + + # Pre-compute gripper post-multiplication matrix + self._fk_gripper_post = self.X_GoGc @ self.X_BoGo @ self.gripper_X0 + + def forward_kinematics( + self, + robot_pos_deg: NDArray[np.float32], + frame: str = "gripper_tip", + ) -> NDArray[np.float32]: + """Generic forward kinematics. + + Args: + robot_pos_deg: Joint positions in degrees. Can be ``None`` when + computing the *base* frame as it does not depend on joint + angles. + frame: Target frame. One of + ``{"base", "shoulder", "humerus", "forearm", "wrist", "gripper", "gripper_tip"}``. + + Returns + ------- + NDArray[np.float32] + 4×4 homogeneous transformation matrix of the requested frame + expressed in the world coordinate system. + """ + frame = frame.lower() + if frame not in { + "base", + "shoulder", + "humerus", + "forearm", + "wrist", + "gripper", + "gripper_tip", + }: + raise ValueError( + f"Unknown frame '{frame}'. Valid options are base, shoulder, humerus, forearm, wrist, gripper, gripper_tip." + ) + + # Base frame does not rely on joint angles. + if frame == "base": + return self.X_WoBo @ self.X_BoBc @ self.base_X0 + + robot_pos_rad = robot_pos_deg / 180 * np.pi + + # Extract joint angles (note the sign convention for shoulder lift). + theta_shoulder_pan = robot_pos_rad[0] + theta_shoulder_lift = -robot_pos_rad[1] + theta_elbow_flex = robot_pos_rad[2] + theta_wrist_flex = robot_pos_rad[3] + theta_wrist_roll = robot_pos_rad[4] + + # Start with the world-to-base transform; incrementally add successive links. + transformation_matrix = self.X_WoBo @ screw_axis_to_transform(self.S_BS, theta_shoulder_pan) + if frame == "shoulder": + return transformation_matrix @ self.X_SoSc @ self.X_BS + + transformation_matrix = transformation_matrix @ screw_axis_to_transform( + self.S_BH, theta_shoulder_lift + ) + if frame == "humerus": + return transformation_matrix @ self.X_HoHc @ self.X_BH + + transformation_matrix = transformation_matrix @ screw_axis_to_transform(self.S_BF, theta_elbow_flex) + if frame == "forearm": + return transformation_matrix @ self.X_ForearmFc @ self.X_BF + + transformation_matrix = transformation_matrix @ screw_axis_to_transform(self.S_BR, theta_wrist_flex) + if frame == "wrist": + return transformation_matrix @ self.X_RoRc @ self.X_BR @ self.wrist_X0 + + transformation_matrix = transformation_matrix @ screw_axis_to_transform(self.S_BG, theta_wrist_roll) + if frame == "gripper": + return transformation_matrix @ self._fk_gripper_post + else: # frame == "gripper_tip" + return transformation_matrix @ self.X_GoGt @ self.X_BoGo @ self.gripper_X0 + + def compute_jacobian( + self, robot_pos_deg: NDArray[np.float32], frame: str = "gripper_tip" + ) -> NDArray[np.float32]: + """Finite differences to compute the Jacobian. + J(i, j) represents how the ith component of the end-effector's velocity changes wrt a small change + in the jth joint's velocity. + + Args: + robot_pos_deg: Current joint positions in degrees + fk_func: Forward kinematics function to use (defaults to fk_gripper) + """ + + eps = 1e-8 + jac = np.zeros(shape=(6, 5)) + delta = np.zeros(len(robot_pos_deg[:-1]), dtype=np.float64) + for el_ix in range(len(robot_pos_deg[:-1])): + delta *= 0 + delta[el_ix] = eps / 2 + sdot = ( + pose_difference_se3( + self.forward_kinematics(robot_pos_deg[:-1] + delta, frame), + self.forward_kinematics(robot_pos_deg[:-1] - delta, frame), + ) + / eps + ) + jac[:, el_ix] = sdot + return jac + + def compute_positional_jacobian( + self, robot_pos_deg: NDArray[np.float32], frame: str = "gripper_tip" + ) -> NDArray[np.float32]: + """Finite differences to compute the positional Jacobian. + J(i, j) represents how the ith component of the end-effector's position changes wrt a small change + in the jth joint's velocity. + + Args: + robot_pos_deg: Current joint positions in degrees + fk_func: Forward kinematics function to use (defaults to fk_gripper) + """ + eps = 1e-8 + jac = np.zeros(shape=(3, 5)) + delta = np.zeros(len(robot_pos_deg[:-1]), dtype=np.float64) + for el_ix in range(len(robot_pos_deg[:-1])): + delta *= 0 + delta[el_ix] = eps / 2 + sdot = ( + self.forward_kinematics(robot_pos_deg[:-1] + delta, frame)[:3, 3] + - self.forward_kinematics(robot_pos_deg[:-1] - delta, frame)[:3, 3] + ) / eps + jac[:, el_ix] = sdot + return jac + + def ik( + self, + current_joint_pos: NDArray[np.float32], + desired_ee_pose: NDArray[np.float32], + position_only: bool = True, + frame: str = "gripper_tip", + max_iterations: int = 5, + learning_rate: float = 1, + ) -> NDArray[np.float32]: + """Inverse kinematics using gradient descent. + + Args: + current_joint_state: Initial joint positions in degrees + desired_ee_pose: Target end-effector pose as a 4x4 transformation matrix + position_only: If True, only match end-effector position, not orientation + frame: Target frame. One of + ``{"base", "shoulder", "humerus", "forearm", "wrist", "gripper", "gripper_tip"}``. + max_iterations: Maximum number of iterations to run + learning_rate: Learning rate for gradient descent + + Returns: + Joint positions in degrees that achieve the desired end-effector pose + """ + # Do gradient descent. + current_joint_state = current_joint_pos.copy() + for _ in range(max_iterations): + current_ee_pose = self.forward_kinematics(current_joint_state, frame) + if not position_only: + error = se3_error(desired_ee_pose, current_ee_pose) + jac = self.compute_jacobian(current_joint_state, frame) + else: + error = desired_ee_pose[:3, 3] - current_ee_pose[:3, 3] + jac = self.compute_positional_jacobian(current_joint_state, frame) + delta_angles = np.linalg.pinv(jac) @ error + current_joint_state[:-1] += learning_rate * delta_angles + + if np.linalg.norm(error) < 5e-3: + return current_joint_state + return current_joint_state diff --git a/lerobot/common/optim/optimizers.py b/lerobot/common/optim/optimizers.py index 0cf4124ce6..903434f593 100644 --- a/lerobot/common/optim/optimizers.py +++ b/lerobot/common/optim/optimizers.py @@ -14,8 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. import abc -from dataclasses import asdict, dataclass +from dataclasses import asdict, dataclass, field from pathlib import Path +from typing import Any import draccus import torch @@ -44,7 +45,16 @@ def default_choice_name(cls) -> str | None: return "adam" @abc.abstractmethod - def build(self) -> torch.optim.Optimizer: + def build(self) -> torch.optim.Optimizer | dict[str, torch.optim.Optimizer]: + """ + Build the optimizer. It can be a single optimizer or a dictionary of optimizers. + NOTE: Multiple optimizers are useful when you have different models to optimize. + For example, you can have one optimizer for the policy and another one for the value function + in reinforcement learning settings. + + Returns: + The optimizer or a dictionary of optimizers. + """ raise NotImplementedError @@ -94,7 +104,76 @@ def build(self, params: dict) -> torch.optim.Optimizer: return torch.optim.SGD(params, **kwargs) -def save_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> None: +@OptimizerConfig.register_subclass("multi_adam") +@dataclass +class MultiAdamConfig(OptimizerConfig): + """Configuration for multiple Adam optimizers with different parameter groups. + + This creates a dictionary of Adam optimizers, each with its own hyperparameters. + + Args: + lr: Default learning rate (used if not specified for a group) + weight_decay: Default weight decay (used if not specified for a group) + optimizer_groups: Dictionary mapping parameter group names to their hyperparameters + grad_clip_norm: Gradient clipping norm + """ + + lr: float = 1e-3 + weight_decay: float = 0.0 + grad_clip_norm: float = 10.0 + optimizer_groups: dict[str, dict[str, Any]] = field(default_factory=dict) + + def build(self, params_dict: dict[str, list]) -> dict[str, torch.optim.Optimizer]: + """Build multiple Adam optimizers. + + Args: + params_dict: Dictionary mapping parameter group names to lists of parameters + The keys should match the keys in optimizer_groups + + Returns: + Dictionary mapping parameter group names to their optimizers + """ + optimizers = {} + + for name, params in params_dict.items(): + # Get group-specific hyperparameters or use defaults + group_config = self.optimizer_groups.get(name, {}) + + # Create optimizer with merged parameters (defaults + group-specific) + optimizer_kwargs = { + "lr": group_config.get("lr", self.lr), + "betas": group_config.get("betas", (0.9, 0.999)), + "eps": group_config.get("eps", 1e-5), + "weight_decay": group_config.get("weight_decay", self.weight_decay), + } + + optimizers[name] = torch.optim.Adam(params, **optimizer_kwargs) + + return optimizers + + +def save_optimizer_state( + optimizer: torch.optim.Optimizer | dict[str, torch.optim.Optimizer], save_dir: Path +) -> None: + """Save optimizer state to disk. + + Args: + optimizer: Either a single optimizer or a dictionary of optimizers. + save_dir: Directory to save the optimizer state. + """ + if isinstance(optimizer, dict): + # Handle dictionary of optimizers + for name, opt in optimizer.items(): + optimizer_dir = save_dir / name + optimizer_dir.mkdir(exist_ok=True, parents=True) + _save_single_optimizer_state(opt, optimizer_dir) + else: + # Handle single optimizer + _save_single_optimizer_state(optimizer, save_dir) + + +def _save_single_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> None: + """Save a single optimizer's state to disk.""" state = optimizer.state_dict() param_groups = state.pop("param_groups") flat_state = flatten_dict(state) @@ -102,11 +181,44 @@ def save_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> No write_json(param_groups, save_dir / OPTIMIZER_PARAM_GROUPS) -def load_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> torch.optim.Optimizer: +def load_optimizer_state( + optimizer: torch.optim.Optimizer | dict[str, torch.optim.Optimizer], save_dir: Path +) -> torch.optim.Optimizer | dict[str, torch.optim.Optimizer]: + """Load optimizer state from disk. + + Args: + optimizer: Either a single optimizer or a dictionary of optimizers. + save_dir: Directory to load the optimizer state from. + + Returns: + The updated optimizer(s) with loaded state. + """ + if isinstance(optimizer, dict): + # Handle dictionary of optimizers + loaded_optimizers = {} + for name, opt in optimizer.items(): + optimizer_dir = save_dir / name + if optimizer_dir.exists(): + loaded_optimizers[name] = _load_single_optimizer_state(opt, optimizer_dir) + else: + loaded_optimizers[name] = opt + return loaded_optimizers + else: + # Handle single optimizer + return _load_single_optimizer_state(optimizer, save_dir) + + +def _load_single_optimizer_state(optimizer: torch.optim.Optimizer, save_dir: Path) -> torch.optim.Optimizer: + """Load a single optimizer's state from disk.""" current_state_dict = optimizer.state_dict() flat_state = load_file(save_dir / OPTIMIZER_STATE) state = unflatten_dict(flat_state) - loaded_state_dict = {"state": {int(k): v for k, v in state["state"].items()}} + + # Handle case where 'state' key might not exist (for newly created optimizers) + if "state" in state: + loaded_state_dict = {"state": {int(k): v for k, v in state["state"].items()}} + else: + loaded_state_dict = {"state": {}} if "param_groups" in current_state_dict: param_groups = deserialize_json_into_object( diff --git a/lerobot/common/policies/factory.py b/lerobot/common/policies/factory.py index 3aade06656..682bb8cee9 100644 --- a/lerobot/common/policies/factory.py +++ b/lerobot/common/policies/factory.py @@ -27,6 +27,8 @@ from lerobot.common.policies.pi0.configuration_pi0 import PI0Config from lerobot.common.policies.pi0fast.configuration_pi0fast import PI0FASTConfig from lerobot.common.policies.pretrained import PreTrainedPolicy +from lerobot.common.policies.sac.configuration_sac import SACConfig +from lerobot.common.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig from lerobot.common.policies.smolvla.configuration_smolvla import SmolVLAConfig from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig from lerobot.common.policies.vqbet.configuration_vqbet import VQBeTConfig @@ -60,6 +62,14 @@ def get_policy_class(name: str) -> PreTrainedPolicy: from lerobot.common.policies.pi0fast.modeling_pi0fast import PI0FASTPolicy return PI0FASTPolicy + elif name == "sac": + from lerobot.common.policies.sac.modeling_sac import SACPolicy + + return SACPolicy + elif name == "reward_classifier": + from lerobot.common.policies.sac.reward_model.modeling_classifier import Classifier + + return Classifier elif name == "smolvla": from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy @@ -81,8 +91,12 @@ def make_policy_config(policy_type: str, **kwargs) -> PreTrainedConfig: return PI0Config(**kwargs) elif policy_type == "pi0fast": return PI0FASTConfig(**kwargs) + elif policy_type == "sac": + return SACConfig(**kwargs) elif policy_type == "smolvla": return SmolVLAConfig(**kwargs) + elif policy_type == "reward_classifier": + return RewardClassifierConfig(**kwargs) else: raise ValueError(f"Policy type '{policy_type}' is not available.") diff --git a/lerobot/common/policies/normalize.py b/lerobot/common/policies/normalize.py index b3255ec106..9cc94b9298 100644 --- a/lerobot/common/policies/normalize.py +++ b/lerobot/common/policies/normalize.py @@ -151,6 +151,7 @@ def __init__( # TODO(rcadene): should we remove torch.no_grad? @torch.no_grad def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: + # TODO: Remove this shallow copy batch = dict(batch) # shallow copy avoids mutating the input batch for key, ft in self.features.items(): if key not in batch: @@ -252,3 +253,168 @@ def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: else: raise ValueError(norm_mode) return batch + + +# TODO (azouitine): We should replace all normalization on the policies with register_buffer normalization +# and remove the `Normalize` and `Unnormalize` classes. +def _initialize_stats_buffers( + module: nn.Module, + features: dict[str, PolicyFeature], + norm_map: dict[str, NormalizationMode], + stats: dict[str, dict[str, Tensor]] | None = None, +) -> None: + """Register statistics buffers (mean/std or min/max) on the given *module*. + + The logic matches the previous constructors of `NormalizeBuffer` and `UnnormalizeBuffer`, + but is factored out so it can be reused by both classes and stay in sync. + """ + for key, ft in features.items(): + norm_mode = norm_map.get(ft.type, NormalizationMode.IDENTITY) + if norm_mode is NormalizationMode.IDENTITY: + continue + + shape: tuple[int, ...] = tuple(ft.shape) + if ft.type is FeatureType.VISUAL: + # reduce spatial dimensions, keep channel dimension only + c, *_ = shape + shape = (c, 1, 1) + + prefix = key.replace(".", "_") + + if norm_mode is NormalizationMode.MEAN_STD: + mean = torch.full(shape, torch.inf, dtype=torch.float32) + std = torch.full(shape, torch.inf, dtype=torch.float32) + + if stats and key in stats and "mean" in stats[key] and "std" in stats[key]: + mean_data = stats[key]["mean"] + std_data = stats[key]["std"] + if isinstance(mean_data, torch.Tensor): + # Note: The clone is needed to make sure that the logic in save_pretrained doesn't see duplicated + # tensors anywhere (for example, when we use the same stats for normalization and + # unnormalization). See the logic here + # https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L97. + mean = mean_data.clone().to(dtype=torch.float32) + std = std_data.clone().to(dtype=torch.float32) + else: + raise ValueError(f"Unsupported stats type for key '{key}' (expected ndarray or Tensor).") + + module.register_buffer(f"{prefix}_mean", mean) + module.register_buffer(f"{prefix}_std", std) + continue + + if norm_mode is NormalizationMode.MIN_MAX: + min_val = torch.full(shape, torch.inf, dtype=torch.float32) + max_val = torch.full(shape, torch.inf, dtype=torch.float32) + + if stats and key in stats and "min" in stats[key] and "max" in stats[key]: + min_data = stats[key]["min"] + max_data = stats[key]["max"] + if isinstance(min_data, torch.Tensor): + min_val = min_data.clone().to(dtype=torch.float32) + max_val = max_data.clone().to(dtype=torch.float32) + else: + raise ValueError(f"Unsupported stats type for key '{key}' (expected ndarray or Tensor).") + + module.register_buffer(f"{prefix}_min", min_val) + module.register_buffer(f"{prefix}_max", max_val) + continue + + raise ValueError(norm_mode) + + +class NormalizeBuffer(nn.Module): + """Same as `Normalize` but statistics are stored as registered buffers rather than parameters.""" + + def __init__( + self, + features: dict[str, PolicyFeature], + norm_map: dict[str, NormalizationMode], + stats: dict[str, dict[str, Tensor]] | None = None, + ): + super().__init__() + self.features = features + self.norm_map = norm_map + + _initialize_stats_buffers(self, features, norm_map, stats) + + def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: + batch = dict(batch) + for key, ft in self.features.items(): + if key not in batch: + continue + + norm_mode = self.norm_map.get(ft.type, NormalizationMode.IDENTITY) + if norm_mode is NormalizationMode.IDENTITY: + continue + + prefix = key.replace(".", "_") + + if norm_mode is NormalizationMode.MEAN_STD: + mean = getattr(self, f"{prefix}_mean") + std = getattr(self, f"{prefix}_std") + assert not torch.isinf(mean).any(), _no_stats_error_str("mean") + assert not torch.isinf(std).any(), _no_stats_error_str("std") + batch[key] = (batch[key] - mean) / (std + 1e-8) + continue + + if norm_mode is NormalizationMode.MIN_MAX: + min_val = getattr(self, f"{prefix}_min") + max_val = getattr(self, f"{prefix}_max") + assert not torch.isinf(min_val).any(), _no_stats_error_str("min") + assert not torch.isinf(max_val).any(), _no_stats_error_str("max") + batch[key] = (batch[key] - min_val) / (max_val - min_val + 1e-8) + batch[key] = batch[key] * 2 - 1 + continue + + raise ValueError(norm_mode) + + return batch + + +class UnnormalizeBuffer(nn.Module): + """Inverse operation of `NormalizeBuffer`. Uses registered buffers for statistics.""" + + def __init__( + self, + features: dict[str, PolicyFeature], + norm_map: dict[str, NormalizationMode], + stats: dict[str, dict[str, Tensor]] | None = None, + ): + super().__init__() + self.features = features + self.norm_map = norm_map + + _initialize_stats_buffers(self, features, norm_map, stats) + + def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: + # batch = dict(batch) + for key, ft in self.features.items(): + if key not in batch: + continue + + norm_mode = self.norm_map.get(ft.type, NormalizationMode.IDENTITY) + if norm_mode is NormalizationMode.IDENTITY: + continue + + prefix = key.replace(".", "_") + + if norm_mode is NormalizationMode.MEAN_STD: + mean = getattr(self, f"{prefix}_mean") + std = getattr(self, f"{prefix}_std") + assert not torch.isinf(mean).any(), _no_stats_error_str("mean") + assert not torch.isinf(std).any(), _no_stats_error_str("std") + batch[key] = batch[key] * std + mean + continue + + if norm_mode is NormalizationMode.MIN_MAX: + min_val = getattr(self, f"{prefix}_min") + max_val = getattr(self, f"{prefix}_max") + assert not torch.isinf(min_val).any(), _no_stats_error_str("min") + assert not torch.isinf(max_val).any(), _no_stats_error_str("max") + batch[key] = (batch[key] + 1) / 2 + batch[key] = batch[key] * (max_val - min_val) + min_val + continue + + raise ValueError(norm_mode) + + return batch diff --git a/lerobot/common/policies/sac/configuration_sac.py b/lerobot/common/policies/sac/configuration_sac.py new file mode 100644 index 0000000000..db58beb2f0 --- /dev/null +++ b/lerobot/common/policies/sac/configuration_sac.py @@ -0,0 +1,245 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass, field + +from lerobot.common.constants import ACTION, OBS_IMAGE, OBS_STATE +from lerobot.common.optim.optimizers import MultiAdamConfig +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import NormalizationMode + + +def is_image_feature(key: str) -> bool: + """Check if a feature key represents an image feature. + + Args: + key: The feature key to check + + Returns: + True if the key represents an image feature, False otherwise + """ + return key.startswith(OBS_IMAGE) + + +@dataclass +class ConcurrencyConfig: + """Configuration for the concurrency of the actor and learner. + Possible values are: + - "threads": Use threads for the actor and learner. + - "processes": Use processes for the actor and learner. + """ + + actor: str = "threads" + learner: str = "threads" + + +@dataclass +class ActorLearnerConfig: + learner_host: str = "127.0.0.1" + learner_port: int = 50051 + policy_parameters_push_frequency: int = 4 + queue_get_timeout: float = 2 + + +@dataclass +class CriticNetworkConfig: + hidden_dims: list[int] = field(default_factory=lambda: [256, 256]) + activate_final: bool = True + final_activation: str | None = None + + +@dataclass +class ActorNetworkConfig: + hidden_dims: list[int] = field(default_factory=lambda: [256, 256]) + activate_final: bool = True + + +@dataclass +class PolicyConfig: + use_tanh_squash: bool = True + std_min: float = 1e-5 + std_max: float = 10.0 + init_final: float = 0.05 + + +@PreTrainedConfig.register_subclass("sac") +@dataclass +class SACConfig(PreTrainedConfig): + """Soft Actor-Critic (SAC) configuration. + + SAC is an off-policy actor-critic deep RL algorithm based on the maximum entropy + reinforcement learning framework. It learns a policy and a Q-function simultaneously + using experience collected from the environment. + + This configuration class contains all the parameters needed to define a SAC agent, + including network architectures, optimization settings, and algorithm-specific + hyperparameters. + """ + + # Mapping of feature types to normalization modes + normalization_mapping: dict[str, NormalizationMode] = field( + default_factory=lambda: { + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MIN_MAX, + "ENV": NormalizationMode.MIN_MAX, + "ACTION": NormalizationMode.MIN_MAX, + } + ) + + # Statistics for normalizing different types of inputs + dataset_stats: dict[str, dict[str, list[float]]] | None = field( + default_factory=lambda: { + OBS_IMAGE: { + "mean": [0.485, 0.456, 0.406], + "std": [0.229, 0.224, 0.225], + }, + OBS_STATE: { + "min": [0.0, 0.0], + "max": [1.0, 1.0], + }, + ACTION: { + "min": [0.0, 0.0, 0.0], + "max": [1.0, 1.0, 1.0], + }, + } + ) + + # Architecture specifics + # Device to run the model on (e.g., "cuda", "cpu") + device: str = "cpu" + # Device to store the model on + storage_device: str = "cpu" + # Name of the vision encoder model (Set to "helper2424/resnet10" for hil serl resnet10) + vision_encoder_name: str | None = None + # Whether to freeze the vision encoder during training + freeze_vision_encoder: bool = True + # Hidden dimension size for the image encoder + image_encoder_hidden_dim: int = 32 + # Whether to use a shared encoder for actor and critic + shared_encoder: bool = True + # Number of discrete actions, eg for gripper actions + num_discrete_actions: int | None = None + # Dimension of the image embedding pooling + image_embedding_pooling_dim: int = 8 + + # Training parameter + # Number of steps for online training + online_steps: int = 1000000 + # Seed for the online environment + online_env_seed: int = 10000 + # Capacity of the online replay buffer + online_buffer_capacity: int = 100000 + # Capacity of the offline replay buffer + offline_buffer_capacity: int = 100000 + # Whether to use asynchronous prefetching for the buffers + async_prefetch: bool = False + # Number of steps before learning starts + online_step_before_learning: int = 100 + # Frequency of policy updates + policy_update_freq: int = 1 + + # SAC algorithm parameters + # Discount factor for the SAC algorithm + discount: float = 0.99 + # Initial temperature value + temperature_init: float = 1.0 + # Number of critics in the ensemble + num_critics: int = 2 + # Number of subsampled critics for training + num_subsample_critics: int | None = None + # Learning rate for the critic network + critic_lr: float = 3e-4 + # Learning rate for the actor network + actor_lr: float = 3e-4 + # Learning rate for the temperature parameter + temperature_lr: float = 3e-4 + # Weight for the critic target update + critic_target_update_weight: float = 0.005 + # Update-to-data ratio for the UTD algorithm (If you want enable utd_ratio, you need to set it to >1) + utd_ratio: int = 1 + # Hidden dimension size for the state encoder + state_encoder_hidden_dim: int = 256 + # Dimension of the latent space + latent_dim: int = 256 + # Target entropy for the SAC algorithm + target_entropy: float | None = None + # Whether to use backup entropy for the SAC algorithm + use_backup_entropy: bool = True + # Gradient clipping norm for the SAC algorithm + grad_clip_norm: float = 40.0 + + # Network configuration + # Configuration for the critic network architecture + critic_network_kwargs: CriticNetworkConfig = field(default_factory=CriticNetworkConfig) + # Configuration for the actor network architecture + actor_network_kwargs: ActorNetworkConfig = field(default_factory=ActorNetworkConfig) + # Configuration for the policy parameters + policy_kwargs: PolicyConfig = field(default_factory=PolicyConfig) + # Configuration for the discrete critic network + discrete_critic_network_kwargs: CriticNetworkConfig = field(default_factory=CriticNetworkConfig) + # Configuration for actor-learner architecture + actor_learner_config: ActorLearnerConfig = field(default_factory=ActorLearnerConfig) + # Configuration for concurrency settings (you can use threads or processes for the actor and learner) + concurrency: ConcurrencyConfig = field(default_factory=ConcurrencyConfig) + + # Optimizations + use_torch_compile: bool = True + + def __post_init__(self): + super().__post_init__() + # Any validation specific to SAC configuration + + def get_optimizer_preset(self) -> MultiAdamConfig: + return MultiAdamConfig( + weight_decay=0.0, + optimizer_groups={ + "actor": {"lr": self.actor_lr}, + "critic": {"lr": self.critic_lr}, + "temperature": {"lr": self.temperature_lr}, + }, + ) + + def get_scheduler_preset(self) -> None: + return None + + def validate_features(self) -> None: + has_image = any(is_image_feature(key) for key in self.input_features) + has_state = OBS_STATE in self.input_features + + if not (has_state or has_image): + raise ValueError( + "You must provide either 'observation.state' or an image observation (key starting with 'observation.image') in the input features" + ) + + if "action" not in self.output_features: + raise ValueError("You must provide 'action' in the output features") + + @property + def image_features(self) -> list[str]: + return [key for key in self.input_features if is_image_feature(key)] + + @property + def observation_delta_indices(self) -> list: + return None + + @property + def action_delta_indices(self) -> list: + return None # SAC typically predicts one action at a time + + @property + def reward_delta_indices(self) -> None: + return None diff --git a/lerobot/common/policies/sac/modeling_sac.py b/lerobot/common/policies/sac/modeling_sac.py new file mode 100644 index 0000000000..b588115ea0 --- /dev/null +++ b/lerobot/common/policies/sac/modeling_sac.py @@ -0,0 +1,1111 @@ +#!/usr/bin/env python + +# Copyright 2024 The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import asdict +from typing import Callable, Literal + +import einops +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F # noqa: N812 +from torch import Tensor +from torch.distributions import MultivariateNormal, TanhTransform, Transform, TransformedDistribution + +from lerobot.common.policies.normalize import NormalizeBuffer +from lerobot.common.policies.pretrained import PreTrainedPolicy +from lerobot.common.policies.sac.configuration_sac import SACConfig, is_image_feature +from lerobot.common.policies.utils import get_device_from_parameters + +DISCRETE_DIMENSION_INDEX = -1 # Gripper is always the last dimension + + +class SACPolicy( + PreTrainedPolicy, +): + config_class = SACConfig + name = "sac" + + def __init__( + self, + config: SACConfig | None = None, + dataset_stats: dict[str, dict[str, Tensor]] | None = None, + ): + super().__init__(config) + config.validate_features() + self.config = config + + # Determine action dimension and initialize all components + continuous_action_dim = config.output_features["action"].shape[0] + self._init_normalization(dataset_stats) + self._init_encoders() + self._init_critics(continuous_action_dim) + self._init_actor(continuous_action_dim) + self._init_temperature() + + def get_optim_params(self) -> dict: + optim_params = { + "actor": [ + p + for n, p in self.actor.named_parameters() + if not n.startswith("encoder") or not self.shared_encoder + ], + "critic": self.critic_ensemble.parameters(), + "temperature": self.log_alpha, + } + if self.config.num_discrete_actions is not None: + optim_params["discrete_critic"] = self.discrete_critic.parameters() + return optim_params + + def reset(self): + """Reset the policy""" + pass + + @torch.no_grad() + def select_action(self, batch: dict[str, Tensor]) -> Tensor: + """Select action for inference/evaluation""" + + observations_features = None + if self.shared_encoder and self.actor.encoder.has_images: + # Cache and normalize image features + observations_features = self.actor.encoder.get_cached_image_features(batch, normalize=True) + + actions, _, _ = self.actor(batch, observations_features) + + if self.config.num_discrete_actions is not None: + discrete_action_value = self.discrete_critic(batch, observations_features) + discrete_action = torch.argmax(discrete_action_value, dim=-1, keepdim=True) + actions = torch.cat([actions, discrete_action], dim=-1) + + return actions + + def critic_forward( + self, + observations: dict[str, Tensor], + actions: Tensor, + use_target: bool = False, + observation_features: Tensor | None = None, + ) -> Tensor: + """Forward pass through a critic network ensemble + + Args: + observations: Dictionary of observations + actions: Action tensor + use_target: If True, use target critics, otherwise use ensemble critics + + Returns: + Tensor of Q-values from all critics + """ + + critics = self.critic_target if use_target else self.critic_ensemble + q_values = critics(observations, actions, observation_features) + return q_values + + def discrete_critic_forward( + self, observations, use_target=False, observation_features=None + ) -> torch.Tensor: + """Forward pass through a discrete critic network + + Args: + observations: Dictionary of observations + use_target: If True, use target critics, otherwise use ensemble critics + observation_features: Optional pre-computed observation features to avoid recomputing encoder output + + Returns: + Tensor of Q-values from the discrete critic network + """ + discrete_critic = self.discrete_critic_target if use_target else self.discrete_critic + q_values = discrete_critic(observations, observation_features) + return q_values + + def forward( + self, + batch: dict[str, Tensor | dict[str, Tensor]], + model: Literal["actor", "critic", "temperature", "discrete_critic"] = "critic", + ) -> dict[str, Tensor]: + """Compute the loss for the given model + + Args: + batch: Dictionary containing: + - action: Action tensor + - reward: Reward tensor + - state: Observations tensor dict + - next_state: Next observations tensor dict + - done: Done mask tensor + - observation_feature: Optional pre-computed observation features + - next_observation_feature: Optional pre-computed next observation features + model: Which model to compute the loss for ("actor", "critic", "discrete_critic", or "temperature") + + Returns: + The computed loss tensor + """ + # Extract common components from batch + actions: Tensor = batch["action"] + observations: dict[str, Tensor] = batch["state"] + observation_features: Tensor = batch.get("observation_feature") + + if model == "critic": + # Extract critic-specific components + rewards: Tensor = batch["reward"] + next_observations: dict[str, Tensor] = batch["next_state"] + done: Tensor = batch["done"] + next_observation_features: Tensor = batch.get("next_observation_feature") + + loss_critic = self.compute_loss_critic( + observations=observations, + actions=actions, + rewards=rewards, + next_observations=next_observations, + done=done, + observation_features=observation_features, + next_observation_features=next_observation_features, + ) + + return {"loss_critic": loss_critic} + + if model == "discrete_critic" and self.config.num_discrete_actions is not None: + # Extract critic-specific components + rewards: Tensor = batch["reward"] + next_observations: dict[str, Tensor] = batch["next_state"] + done: Tensor = batch["done"] + next_observation_features: Tensor = batch.get("next_observation_feature") + complementary_info = batch.get("complementary_info") + loss_discrete_critic = self.compute_loss_discrete_critic( + observations=observations, + actions=actions, + rewards=rewards, + next_observations=next_observations, + done=done, + observation_features=observation_features, + next_observation_features=next_observation_features, + complementary_info=complementary_info, + ) + return {"loss_discrete_critic": loss_discrete_critic} + if model == "actor": + return { + "loss_actor": self.compute_loss_actor( + observations=observations, + observation_features=observation_features, + ) + } + + if model == "temperature": + return { + "loss_temperature": self.compute_loss_temperature( + observations=observations, + observation_features=observation_features, + ) + } + + raise ValueError(f"Unknown model type: {model}") + + def update_target_networks(self): + """Update target networks with exponential moving average""" + for target_param, param in zip( + self.critic_target.parameters(), + self.critic_ensemble.parameters(), + strict=True, + ): + target_param.data.copy_( + param.data * self.config.critic_target_update_weight + + target_param.data * (1.0 - self.config.critic_target_update_weight) + ) + if self.config.num_discrete_actions is not None: + for target_param, param in zip( + self.discrete_critic_target.parameters(), + self.discrete_critic.parameters(), + strict=True, + ): + target_param.data.copy_( + param.data * self.config.critic_target_update_weight + + target_param.data * (1.0 - self.config.critic_target_update_weight) + ) + + def update_temperature(self): + self.temperature = self.log_alpha.exp().item() + + def compute_loss_critic( + self, + observations, + actions, + rewards, + next_observations, + done, + observation_features: Tensor | None = None, + next_observation_features: Tensor | None = None, + ) -> Tensor: + with torch.no_grad(): + next_action_preds, next_log_probs, _ = self.actor(next_observations, next_observation_features) + + # 2- compute q targets + q_targets = self.critic_forward( + observations=next_observations, + actions=next_action_preds, + use_target=True, + observation_features=next_observation_features, + ) + + # subsample critics to prevent overfitting if use high UTD (update to date) + # TODO: Get indices before forward pass to avoid unnecessary computation + if self.config.num_subsample_critics is not None: + indices = torch.randperm(self.config.num_critics) + indices = indices[: self.config.num_subsample_critics] + q_targets = q_targets[indices] + + # critics subsample size + min_q, _ = q_targets.min(dim=0) # Get values from min operation + if self.config.use_backup_entropy: + min_q = min_q - (self.temperature * next_log_probs) + + td_target = rewards + (1 - done) * self.config.discount * min_q + + # 3- compute predicted qs + if self.config.num_discrete_actions is not None: + # NOTE: We only want to keep the continuous action part + # In the buffer we have the full action space (continuous + discrete) + # We need to split them before concatenating them in the critic forward + actions: Tensor = actions[:, :DISCRETE_DIMENSION_INDEX] + q_preds = self.critic_forward( + observations=observations, + actions=actions, + use_target=False, + observation_features=observation_features, + ) + + # 4- Calculate loss + # Compute state-action value loss (TD loss) for all of the Q functions in the ensemble. + td_target_duplicate = einops.repeat(td_target, "b -> e b", e=q_preds.shape[0]) + # You compute the mean loss of the batch for each critic and then to compute the final loss you sum them up + critics_loss = ( + F.mse_loss( + input=q_preds, + target=td_target_duplicate, + reduction="none", + ).mean(dim=1) + ).sum() + return critics_loss + + def compute_loss_discrete_critic( + self, + observations, + actions, + rewards, + next_observations, + done, + observation_features=None, + next_observation_features=None, + complementary_info=None, + ): + # NOTE: We only want to keep the discrete action part + # In the buffer we have the full action space (continuous + discrete) + # We need to split them before concatenating them in the critic forward + actions_discrete: Tensor = actions[:, DISCRETE_DIMENSION_INDEX:].clone() + actions_discrete = torch.round(actions_discrete) + actions_discrete = actions_discrete.long() + + discrete_penalties: Tensor | None = None + if complementary_info is not None: + discrete_penalties: Tensor | None = complementary_info.get("discrete_penalty") + + with torch.no_grad(): + # For DQN, select actions using online network, evaluate with target network + next_discrete_qs = self.discrete_critic_forward( + next_observations, use_target=False, observation_features=next_observation_features + ) + best_next_discrete_action = torch.argmax(next_discrete_qs, dim=-1, keepdim=True) + + # Get target Q-values from target network + target_next_discrete_qs = self.discrete_critic_forward( + observations=next_observations, + use_target=True, + observation_features=next_observation_features, + ) + + # Use gather to select Q-values for best actions + target_next_discrete_q = torch.gather( + target_next_discrete_qs, dim=1, index=best_next_discrete_action + ).squeeze(-1) + + # Compute target Q-value with Bellman equation + rewards_discrete = rewards + if discrete_penalties is not None: + rewards_discrete = rewards + discrete_penalties + target_discrete_q = rewards_discrete + (1 - done) * self.config.discount * target_next_discrete_q + + # Get predicted Q-values for current observations + predicted_discrete_qs = self.discrete_critic_forward( + observations=observations, use_target=False, observation_features=observation_features + ) + + # Use gather to select Q-values for taken actions + predicted_discrete_q = torch.gather(predicted_discrete_qs, dim=1, index=actions_discrete).squeeze(-1) + + # Compute MSE loss between predicted and target Q-values + discrete_critic_loss = F.mse_loss(input=predicted_discrete_q, target=target_discrete_q) + return discrete_critic_loss + + def compute_loss_temperature(self, observations, observation_features: Tensor | None = None) -> Tensor: + """Compute the temperature loss""" + # calculate temperature loss + with torch.no_grad(): + _, log_probs, _ = self.actor(observations, observation_features) + temperature_loss = (-self.log_alpha.exp() * (log_probs + self.target_entropy)).mean() + return temperature_loss + + def compute_loss_actor( + self, + observations, + observation_features: Tensor | None = None, + ) -> Tensor: + actions_pi, log_probs, _ = self.actor(observations, observation_features) + + q_preds = self.critic_forward( + observations=observations, + actions=actions_pi, + use_target=False, + observation_features=observation_features, + ) + min_q_preds = q_preds.min(dim=0)[0] + + actor_loss = ((self.temperature * log_probs) - min_q_preds).mean() + return actor_loss + + def _init_normalization(self, dataset_stats): + """Initialize input/output normalization modules.""" + self.normalize_inputs = nn.Identity() + self.normalize_targets = nn.Identity() + if self.config.dataset_stats is not None: + params = _convert_normalization_params_to_tensor(self.config.dataset_stats) + self.normalize_inputs = NormalizeBuffer( + self.config.input_features, self.config.normalization_mapping, params + ) + stats = dataset_stats or params + self.normalize_targets = NormalizeBuffer( + self.config.output_features, self.config.normalization_mapping, stats + ) + + def _init_encoders(self): + """Initialize shared or separate encoders for actor and critic.""" + self.shared_encoder = self.config.shared_encoder + self.encoder_critic = SACObservationEncoder(self.config, self.normalize_inputs) + self.encoder_actor = ( + self.encoder_critic + if self.shared_encoder + else SACObservationEncoder(self.config, self.normalize_inputs) + ) + + def _init_critics(self, continuous_action_dim): + """Build critic ensemble, targets, and optional discrete critic.""" + heads = [ + CriticHead( + input_dim=self.encoder_critic.output_dim + continuous_action_dim, + **asdict(self.config.critic_network_kwargs), + ) + for _ in range(self.config.num_critics) + ] + self.critic_ensemble = CriticEnsemble( + encoder=self.encoder_critic, ensemble=heads, output_normalization=self.normalize_targets + ) + target_heads = [ + CriticHead( + input_dim=self.encoder_critic.output_dim + continuous_action_dim, + **asdict(self.config.critic_network_kwargs), + ) + for _ in range(self.config.num_critics) + ] + self.critic_target = CriticEnsemble( + encoder=self.encoder_critic, ensemble=target_heads, output_normalization=self.normalize_targets + ) + self.critic_target.load_state_dict(self.critic_ensemble.state_dict()) + + if self.config.use_torch_compile: + self.critic_ensemble = torch.compile(self.critic_ensemble) + self.critic_target = torch.compile(self.critic_target) + + if self.config.num_discrete_actions is not None: + self._init_discrete_critics() + + def _init_discrete_critics(self): + """Build discrete discrete critic ensemble and target networks.""" + self.discrete_critic = DiscreteCritic( + encoder=self.encoder_critic, + input_dim=self.encoder_critic.output_dim, + output_dim=self.config.num_discrete_actions, + **asdict(self.config.discrete_critic_network_kwargs), + ) + self.discrete_critic_target = DiscreteCritic( + encoder=self.encoder_critic, + input_dim=self.encoder_critic.output_dim, + output_dim=self.config.num_discrete_actions, + **asdict(self.config.discrete_critic_network_kwargs), + ) + + # TODO: (maractingi, azouitine) Compile the discrete critic + self.discrete_critic_target.load_state_dict(self.discrete_critic.state_dict()) + + def _init_actor(self, continuous_action_dim): + """Initialize policy actor network and default target entropy.""" + # NOTE: The actor select only the continuous action part + self.actor = Policy( + encoder=self.encoder_actor, + network=MLP(input_dim=self.encoder_actor.output_dim, **asdict(self.config.actor_network_kwargs)), + action_dim=continuous_action_dim, + encoder_is_shared=self.shared_encoder, + **asdict(self.config.policy_kwargs), + ) + + self.target_entropy = self.config.target_entropy + if self.target_entropy is None: + dim = continuous_action_dim + (1 if self.config.num_discrete_actions is not None else 0) + self.target_entropy = -np.prod(dim) / 2 + + def _init_temperature(self): + """Set up temperature parameter and initial log_alpha.""" + temp_init = self.config.temperature_init + self.log_alpha = nn.Parameter(torch.tensor([math.log(temp_init)])) + self.temperature = self.log_alpha.exp().item() + + +class SACObservationEncoder(nn.Module): + """Encode image and/or state vector observations.""" + + def __init__(self, config: SACConfig, input_normalizer: nn.Module) -> None: + super().__init__() + self.config = config + self.input_normalization = input_normalizer + self._init_image_layers() + self._init_state_layers() + self._compute_output_dim() + + def _init_image_layers(self) -> None: + self.image_keys = [k for k in self.config.input_features if is_image_feature(k)] + self.has_images = bool(self.image_keys) + if not self.has_images: + return + + if self.config.vision_encoder_name is not None: + self.image_encoder = PretrainedImageEncoder(self.config) + else: + self.image_encoder = DefaultImageEncoder(self.config) + + if self.config.freeze_vision_encoder: + freeze_image_encoder(self.image_encoder) + + dummy = torch.zeros(1, *self.config.input_features[self.image_keys[0]].shape) + with torch.no_grad(): + _, channels, height, width = self.image_encoder(dummy).shape + + self.spatial_embeddings = nn.ModuleDict() + self.post_encoders = nn.ModuleDict() + + for key in self.image_keys: + name = key.replace(".", "_") + self.spatial_embeddings[name] = SpatialLearnedEmbeddings( + height=height, + width=width, + channel=channels, + num_features=self.config.image_embedding_pooling_dim, + ) + self.post_encoders[name] = nn.Sequential( + nn.Dropout(0.1), + nn.Linear( + in_features=channels * self.config.image_embedding_pooling_dim, + out_features=self.config.latent_dim, + ), + nn.LayerNorm(normalized_shape=self.config.latent_dim), + nn.Tanh(), + ) + + def _init_state_layers(self) -> None: + self.has_env = "observation.environment_state" in self.config.input_features + self.has_state = "observation.state" in self.config.input_features + if self.has_env: + dim = self.config.input_features["observation.environment_state"].shape[0] + self.env_encoder = nn.Sequential( + nn.Linear(dim, self.config.latent_dim), + nn.LayerNorm(self.config.latent_dim), + nn.Tanh(), + ) + if self.has_state: + dim = self.config.input_features["observation.state"].shape[0] + self.state_encoder = nn.Sequential( + nn.Linear(dim, self.config.latent_dim), + nn.LayerNorm(self.config.latent_dim), + nn.Tanh(), + ) + + def _compute_output_dim(self) -> None: + out = 0 + if self.has_images: + out += len(self.image_keys) * self.config.latent_dim + if self.has_env: + out += self.config.latent_dim + if self.has_state: + out += self.config.latent_dim + self._out_dim = out + + def forward( + self, obs: dict[str, Tensor], cache: dict[str, Tensor] | None = None, detach: bool = False + ) -> Tensor: + obs = self.input_normalization(obs) + parts = [] + if self.has_images: + if cache is None: + cache = self.get_cached_image_features(obs, normalize=False) + parts.append(self._encode_images(cache, detach)) + if self.has_env: + parts.append(self.env_encoder(obs["observation.environment_state"])) + if self.has_state: + parts.append(self.state_encoder(obs["observation.state"])) + if parts: + return torch.cat(parts, dim=-1) + + raise ValueError( + "No parts to concatenate, you should have at least one image or environment state or state" + ) + + def get_cached_image_features(self, obs: dict[str, Tensor], normalize: bool = False) -> dict[str, Tensor]: + """Extract and optionally cache image features from observations. + + This function processes image observations through the vision encoder once and returns + the resulting features. + When the image encoder is shared between actor and critics AND frozen, these features can be safely cached and + reused across policy components (actor, critic, discrete_critic), avoiding redundant forward passes. + + Performance impact: + - The vision encoder forward pass is typically the main computational bottleneck during training and inference + - Caching these features can provide 2-4x speedup in training and inference + + Normalization behavior: + - When called from inside forward(): set normalize=False since inputs are already normalized + - When called from outside forward(): set normalize=True to ensure proper input normalization + + Usage patterns: + - Called in select_action() with normalize=True + - Called in learner.py's get_observation_features() to pre-compute features for all policy components + - Called internally by forward() with normalize=False + + Args: + obs: Dictionary of observation tensors containing image keys + normalize: Whether to normalize observations before encoding + Set to True when calling directly from outside the encoder's forward method + Set to False when calling from within forward() where inputs are already normalized + + Returns: + Dictionary mapping image keys to their corresponding encoded features + """ + if normalize: + obs = self.input_normalization(obs) + batched = torch.cat([obs[k] for k in self.image_keys], dim=0) + out = self.image_encoder(batched) + chunks = torch.chunk(out, len(self.image_keys), dim=0) + return dict(zip(self.image_keys, chunks, strict=False)) + + def _encode_images(self, cache: dict[str, Tensor], detach: bool) -> Tensor: + """Encode image features from cached observations. + + This function takes pre-encoded image features from the cache and applies spatial embeddings and post-encoders. + It also supports detaching the encoded features if specified. + + Args: + cache (dict[str, Tensor]): The cached image features. + detach (bool): Usually when the encoder is shared between actor and critics, + we want to detach the encoded features on the policy side to avoid backprop through the encoder. + More detail here `https://cdn.aaai.org/ojs/17276/17276-13-20770-1-2-20210518.pdf` + + Returns: + Tensor: The encoded image features. + """ + feats = [] + for k, feat in cache.items(): + safe_key = k.replace(".", "_") + x = self.spatial_embeddings[safe_key](feat) + x = self.post_encoders[safe_key](x) + if detach: + x = x.detach() + feats.append(x) + return torch.cat(feats, dim=-1) + + @property + def output_dim(self) -> int: + return self._out_dim + + +class MLP(nn.Module): + """Multi-layer perceptron builder. + + Dynamically constructs a sequence of layers based on `hidden_dims`: + 1) Linear (in_dim -> out_dim) + 2) Optional Dropout if `dropout_rate` > 0 and (not final layer or `activate_final`) + 3) LayerNorm on the output features + 4) Activation (standard for intermediate layers, `final_activation` for last layer if `activate_final`) + + Arguments: + input_dim (int): Size of input feature dimension. + hidden_dims (list[int]): Sizes for each hidden layer. + activations (Callable or str): Activation to apply between layers. + activate_final (bool): Whether to apply activation at the final layer. + dropout_rate (Optional[float]): Dropout probability applied before normalization and activation. + final_activation (Optional[Callable or str]): Activation for the final layer when `activate_final` is True. + + For each layer, `in_dim` is updated to the previous `out_dim`. All constructed modules are + stored in `self.net` as an `nn.Sequential` container. + """ + + def __init__( + self, + input_dim: int, + hidden_dims: list[int], + activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(), + activate_final: bool = False, + dropout_rate: float | None = None, + final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None, + ): + super().__init__() + layers: list[nn.Module] = [] + in_dim = input_dim + total = len(hidden_dims) + + for idx, out_dim in enumerate(hidden_dims): + # 1) linear transform + layers.append(nn.Linear(in_dim, out_dim)) + + is_last = idx == total - 1 + # 2-4) optionally add dropout, normalization, and activation + if not is_last or activate_final: + if dropout_rate and dropout_rate > 0: + layers.append(nn.Dropout(p=dropout_rate)) + layers.append(nn.LayerNorm(out_dim)) + act_cls = final_activation if is_last and final_activation else activations + act = act_cls if isinstance(act_cls, nn.Module) else getattr(nn, act_cls)() + layers.append(act) + + in_dim = out_dim + + self.net = nn.Sequential(*layers) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.net(x) + + +class CriticHead(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dims: list[int], + activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(), + activate_final: bool = False, + dropout_rate: float | None = None, + init_final: float | None = None, + final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None, + ): + super().__init__() + self.net = MLP( + input_dim=input_dim, + hidden_dims=hidden_dims, + activations=activations, + activate_final=activate_final, + dropout_rate=dropout_rate, + final_activation=final_activation, + ) + self.output_layer = nn.Linear(in_features=hidden_dims[-1], out_features=1) + if init_final is not None: + nn.init.uniform_(self.output_layer.weight, -init_final, init_final) + nn.init.uniform_(self.output_layer.bias, -init_final, init_final) + else: + orthogonal_init()(self.output_layer.weight) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.output_layer(self.net(x)) + + +class CriticEnsemble(nn.Module): + """ + CriticEnsemble wraps multiple CriticHead modules into an ensemble. + + Args: + encoder (SACObservationEncoder): encoder for observations. + ensemble (List[CriticHead]): list of critic heads. + output_normalization (nn.Module): normalization layer for actions. + init_final (float | None): optional initializer scale for final layers. + + Forward returns a tensor of shape (num_critics, batch_size) containing Q-values. + """ + + def __init__( + self, + encoder: SACObservationEncoder, + ensemble: list[CriticHead], + output_normalization: nn.Module, + init_final: float | None = None, + ): + super().__init__() + self.encoder = encoder + self.init_final = init_final + self.output_normalization = output_normalization + self.critics = nn.ModuleList(ensemble) + + def forward( + self, + observations: dict[str, torch.Tensor], + actions: torch.Tensor, + observation_features: torch.Tensor | None = None, + ) -> torch.Tensor: + device = get_device_from_parameters(self) + # Move each tensor in observations to device + observations = {k: v.to(device) for k, v in observations.items()} + # NOTE: We normalize actions it helps for sample efficiency + actions: dict[str, torch.tensor] = {"action": actions} + # NOTE: Normalization layer took dict in input and outputs a dict that why + actions = self.output_normalization(actions)["action"] + actions = actions.to(device) + + obs_enc = self.encoder(observations, cache=observation_features) + + inputs = torch.cat([obs_enc, actions], dim=-1) + + # Loop through critics and collect outputs + q_values = [] + for critic in self.critics: + q_values.append(critic(inputs)) + + # Stack outputs to match expected shape [num_critics, batch_size] + q_values = torch.stack([q.squeeze(-1) for q in q_values], dim=0) + return q_values + + +class DiscreteCritic(nn.Module): + def __init__( + self, + encoder: nn.Module, + input_dim: int, + hidden_dims: list[int], + output_dim: int = 3, + activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(), + activate_final: bool = False, + dropout_rate: float | None = None, + init_final: float | None = None, + final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None, + ): + super().__init__() + self.encoder = encoder + self.output_dim = output_dim + + self.net = MLP( + input_dim=input_dim, + hidden_dims=hidden_dims, + activations=activations, + activate_final=activate_final, + dropout_rate=dropout_rate, + final_activation=final_activation, + ) + + self.output_layer = nn.Linear(in_features=hidden_dims[-1], out_features=self.output_dim) + if init_final is not None: + nn.init.uniform_(self.output_layer.weight, -init_final, init_final) + nn.init.uniform_(self.output_layer.bias, -init_final, init_final) + else: + orthogonal_init()(self.output_layer.weight) + + def forward( + self, observations: torch.Tensor, observation_features: torch.Tensor | None = None + ) -> torch.Tensor: + device = get_device_from_parameters(self) + observations = {k: v.to(device) for k, v in observations.items()} + obs_enc = self.encoder(observations, cache=observation_features) + return self.output_layer(self.net(obs_enc)) + + +class Policy(nn.Module): + def __init__( + self, + encoder: SACObservationEncoder, + network: nn.Module, + action_dim: int, + std_min: float = -5, + std_max: float = 2, + fixed_std: torch.Tensor | None = None, + init_final: float | None = None, + use_tanh_squash: bool = False, + encoder_is_shared: bool = False, + ): + super().__init__() + self.encoder: SACObservationEncoder = encoder + self.network = network + self.action_dim = action_dim + self.std_min = std_min + self.std_max = std_max + self.fixed_std = fixed_std + self.use_tanh_squash = use_tanh_squash + self.encoder_is_shared = encoder_is_shared + + # Find the last Linear layer's output dimension + for layer in reversed(network.net): + if isinstance(layer, nn.Linear): + out_features = layer.out_features + break + # Mean layer + self.mean_layer = nn.Linear(out_features, action_dim) + if init_final is not None: + nn.init.uniform_(self.mean_layer.weight, -init_final, init_final) + nn.init.uniform_(self.mean_layer.bias, -init_final, init_final) + else: + orthogonal_init()(self.mean_layer.weight) + + # Standard deviation layer or parameter + if fixed_std is None: + self.std_layer = nn.Linear(out_features, action_dim) + if init_final is not None: + nn.init.uniform_(self.std_layer.weight, -init_final, init_final) + nn.init.uniform_(self.std_layer.bias, -init_final, init_final) + else: + orthogonal_init()(self.std_layer.weight) + + def forward( + self, + observations: torch.Tensor, + observation_features: torch.Tensor | None = None, + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + # We detach the encoder if it is shared to avoid backprop through it + # This is important to avoid the encoder to be updated through the policy + obs_enc = self.encoder(observations, cache=observation_features, detach=self.encoder_is_shared) + + # Get network outputs + outputs = self.network(obs_enc) + means = self.mean_layer(outputs) + + # Compute standard deviations + if self.fixed_std is None: + log_std = self.std_layer(outputs) + std = torch.exp(log_std) # Match JAX "exp" + std = torch.clamp(std, self.std_min, self.std_max) # Match JAX default clip + else: + std = self.fixed_std.expand_as(means) + + # Build transformed distribution + dist = TanhMultivariateNormalDiag(loc=means, scale_diag=std) + + # Sample actions (reparameterized) + actions = dist.rsample() + + # Compute log_probs + log_probs = dist.log_prob(actions) + + return actions, log_probs, means + + def get_features(self, observations: torch.Tensor) -> torch.Tensor: + """Get encoded features from observations""" + device = get_device_from_parameters(self) + observations = observations.to(device) + if self.encoder is not None: + with torch.inference_mode(): + return self.encoder(observations) + return observations + + +class DefaultImageEncoder(nn.Module): + def __init__(self, config: SACConfig): + super().__init__() + image_key = next(key for key in config.input_features if is_image_feature(key)) + self.image_enc_layers = nn.Sequential( + nn.Conv2d( + in_channels=config.input_features[image_key].shape[0], + out_channels=config.image_encoder_hidden_dim, + kernel_size=7, + stride=2, + ), + nn.ReLU(), + nn.Conv2d( + in_channels=config.image_encoder_hidden_dim, + out_channels=config.image_encoder_hidden_dim, + kernel_size=5, + stride=2, + ), + nn.ReLU(), + nn.Conv2d( + in_channels=config.image_encoder_hidden_dim, + out_channels=config.image_encoder_hidden_dim, + kernel_size=3, + stride=2, + ), + nn.ReLU(), + nn.Conv2d( + in_channels=config.image_encoder_hidden_dim, + out_channels=config.image_encoder_hidden_dim, + kernel_size=3, + stride=2, + ), + nn.ReLU(), + ) + + def forward(self, x): + x = self.image_enc_layers(x) + return x + + +def freeze_image_encoder(image_encoder: nn.Module): + """Freeze all parameters in the encoder""" + for param in image_encoder.parameters(): + param.requires_grad = False + + +class PretrainedImageEncoder(nn.Module): + def __init__(self, config: SACConfig): + super().__init__() + + self.image_enc_layers, self.image_enc_out_shape = self._load_pretrained_vision_encoder(config) + + def _load_pretrained_vision_encoder(self, config: SACConfig): + """Set up CNN encoder""" + from transformers import AutoModel + + self.image_enc_layers = AutoModel.from_pretrained(config.vision_encoder_name, trust_remote_code=True) + + if hasattr(self.image_enc_layers.config, "hidden_sizes"): + self.image_enc_out_shape = self.image_enc_layers.config.hidden_sizes[-1] # Last channel dimension + elif hasattr(self.image_enc_layers, "fc"): + self.image_enc_out_shape = self.image_enc_layers.fc.in_features + else: + raise ValueError("Unsupported vision encoder architecture, make sure you are using a CNN") + return self.image_enc_layers, self.image_enc_out_shape + + def forward(self, x): + enc_feat = self.image_enc_layers(x).last_hidden_state + return enc_feat + + +def orthogonal_init(): + return lambda x: torch.nn.init.orthogonal_(x, gain=1.0) + + +class SpatialLearnedEmbeddings(nn.Module): + def __init__(self, height, width, channel, num_features=8): + """ + PyTorch implementation of learned spatial embeddings + + Args: + height: Spatial height of input features + width: Spatial width of input features + channel: Number of input channels + num_features: Number of output embedding dimensions + """ + super().__init__() + self.height = height + self.width = width + self.channel = channel + self.num_features = num_features + + self.kernel = nn.Parameter(torch.empty(channel, height, width, num_features)) + + nn.init.kaiming_normal_(self.kernel, mode="fan_in", nonlinearity="linear") + + def forward(self, features): + """ + Forward pass for spatial embedding + + Args: + features: Input tensor of shape [B, C, H, W] where B is batch size, + C is number of channels, H is height, and W is width + Returns: + Output tensor of shape [B, C*F] where F is the number of features + """ + + features_expanded = features.unsqueeze(-1) # [B, C, H, W, 1] + kernel_expanded = self.kernel.unsqueeze(0) # [1, C, H, W, F] + + # Element-wise multiplication and spatial reduction + output = (features_expanded * kernel_expanded).sum(dim=(2, 3)) # Sum over H,W dimensions + + # Reshape to combine channel and feature dimensions + output = output.view(output.size(0), -1) # [B, C*F] + + return output + + +class RescaleFromTanh(Transform): + def __init__(self, low: float = -1, high: float = 1): + super().__init__() + + self.low = low + + self.high = high + + def _call(self, x): + # Rescale from (-1, 1) to (low, high) + + return 0.5 * (x + 1.0) * (self.high - self.low) + self.low + + def _inverse(self, y): + # Rescale from (low, high) back to (-1, 1) + + return 2.0 * (y - self.low) / (self.high - self.low) - 1.0 + + def log_abs_det_jacobian(self, x, y): + # log|d(rescale)/dx| = sum(log(0.5 * (high - low))) + + scale = 0.5 * (self.high - self.low) + + return torch.sum(torch.log(scale), dim=-1) + + +class TanhMultivariateNormalDiag(TransformedDistribution): + def __init__(self, loc, scale_diag, low=None, high=None): + base_dist = MultivariateNormal(loc, torch.diag_embed(scale_diag)) + + transforms = [TanhTransform(cache_size=1)] + + if low is not None and high is not None: + low = torch.as_tensor(low) + + high = torch.as_tensor(high) + + transforms.insert(0, RescaleFromTanh(low, high)) + + super().__init__(base_dist, transforms) + + def mode(self): + # Mode is mean of base distribution, passed through transforms + + x = self.base_dist.mean + + for transform in self.transforms: + x = transform(x) + + return x + + def stddev(self): + std = self.base_dist.stddev + + x = std + + for transform in self.transforms: + x = transform(x) + + return x + + +def _convert_normalization_params_to_tensor(normalization_params: dict) -> dict: + converted_params = {} + for outer_key, inner_dict in normalization_params.items(): + converted_params[outer_key] = {} + for key, value in inner_dict.items(): + converted_params[outer_key][key] = torch.tensor(value) + if "image" in outer_key: + converted_params[outer_key][key] = converted_params[outer_key][key].view(3, 1, 1) + + return converted_params diff --git a/lerobot/common/policies/sac/reward_model/configuration_classifier.py b/lerobot/common/policies/sac/reward_model/configuration_classifier.py new file mode 100644 index 0000000000..6e2a551d4d --- /dev/null +++ b/lerobot/common/policies/sac/reward_model/configuration_classifier.py @@ -0,0 +1,76 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass, field + +from lerobot.common.optim.optimizers import AdamWConfig, OptimizerConfig +from lerobot.common.optim.schedulers import LRSchedulerConfig +from lerobot.configs.policies import PreTrainedConfig +from lerobot.configs.types import NormalizationMode + + +@PreTrainedConfig.register_subclass(name="reward_classifier") +@dataclass +class RewardClassifierConfig(PreTrainedConfig): + """Configuration for the Reward Classifier model.""" + + name: str = "reward_classifier" + num_classes: int = 2 + hidden_dim: int = 256 + latent_dim: int = 256 + image_embedding_pooling_dim: int = 8 + dropout_rate: float = 0.1 + model_name: str = "helper2424/resnet10" + device: str = "cpu" + model_type: str = "cnn" # "transformer" or "cnn" + num_cameras: int = 2 + learning_rate: float = 1e-4 + weight_decay: float = 0.01 + grad_clip_norm: float = 1.0 + normalization_mapping: dict[str, NormalizationMode] = field( + default_factory=lambda: { + "VISUAL": NormalizationMode.MEAN_STD, + } + ) + + @property + def observation_delta_indices(self) -> list | None: + return None + + @property + def action_delta_indices(self) -> list | None: + return None + + @property + def reward_delta_indices(self) -> list | None: + return None + + def get_optimizer_preset(self) -> OptimizerConfig: + return AdamWConfig( + lr=self.learning_rate, + weight_decay=self.weight_decay, + grad_clip_norm=self.grad_clip_norm, + ) + + def get_scheduler_preset(self) -> LRSchedulerConfig | None: + return None + + def validate_features(self) -> None: + """Validate feature configurations.""" + has_image = any(key.startswith("observation.image") for key in self.input_features) + if not has_image: + raise ValueError( + "You must provide an image observation (key starting with 'observation.image') in the input features" + ) diff --git a/lerobot/common/policies/sac/reward_model/modeling_classifier.py b/lerobot/common/policies/sac/reward_model/modeling_classifier.py new file mode 100644 index 0000000000..f537e3aefd --- /dev/null +++ b/lerobot/common/policies/sac/reward_model/modeling_classifier.py @@ -0,0 +1,316 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +import torch +from torch import Tensor, nn + +from lerobot.common.constants import OBS_IMAGE, REWARD +from lerobot.common.policies.normalize import Normalize, Unnormalize +from lerobot.common.policies.pretrained import PreTrainedPolicy +from lerobot.common.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig + + +class ClassifierOutput: + """Wrapper for classifier outputs with additional metadata.""" + + def __init__( + self, + logits: Tensor, + probabilities: Tensor | None = None, + hidden_states: Tensor | None = None, + ): + self.logits = logits + self.probabilities = probabilities + self.hidden_states = hidden_states + + def __repr__(self): + return ( + f"ClassifierOutput(logits={self.logits}, " + f"probabilities={self.probabilities}, " + f"hidden_states={self.hidden_states})" + ) + + +class SpatialLearnedEmbeddings(nn.Module): + def __init__(self, height, width, channel, num_features=8): + """ + PyTorch implementation of learned spatial embeddings + + Args: + height: Spatial height of input features + width: Spatial width of input features + channel: Number of input channels + num_features: Number of output embedding dimensions + """ + super().__init__() + self.height = height + self.width = width + self.channel = channel + self.num_features = num_features + + self.kernel = nn.Parameter(torch.empty(channel, height, width, num_features)) + + nn.init.kaiming_normal_(self.kernel, mode="fan_in", nonlinearity="linear") + + def forward(self, features): + """ + Forward pass for spatial embedding + + Args: + features: Input tensor of shape [B, H, W, C] or [H, W, C] if no batch + Returns: + Output tensor of shape [B, C*F] or [C*F] if no batch + """ + + features = features.last_hidden_state + + original_shape = features.shape + if features.dim() == 3: + features = features.unsqueeze(0) # Add batch dim + + features_expanded = features.unsqueeze(-1) # [B, H, W, C, 1] + kernel_expanded = self.kernel.unsqueeze(0) # [1, H, W, C, F] + + # Element-wise multiplication and spatial reduction + output = (features_expanded * kernel_expanded).sum(dim=(2, 3)) # Sum H,W + + # Reshape to combine channel and feature dimensions + output = output.view(output.size(0), -1) # [B, C*F] + + # Remove batch dim + if len(original_shape) == 3: + output = output.squeeze(0) + + return output + + +class Classifier(PreTrainedPolicy): + """Image classifier built on top of a pre-trained encoder.""" + + name = "reward_classifier" + config_class = RewardClassifierConfig + + def __init__( + self, + config: RewardClassifierConfig, + dataset_stats: dict[str, dict[str, Tensor]] | None = None, + ): + from transformers import AutoModel + + super().__init__(config) + self.config = config + + # Initialize normalization (standardized with the policy framework) + self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) + self.normalize_targets = Normalize( + config.output_features, config.normalization_mapping, dataset_stats + ) + self.unnormalize_outputs = Unnormalize( + config.output_features, config.normalization_mapping, dataset_stats + ) + + # Set up encoder + encoder = AutoModel.from_pretrained(self.config.model_name, trust_remote_code=True) + # Extract vision model if we're given a multimodal model + if hasattr(encoder, "vision_model"): + logging.info("Multimodal model detected - using vision encoder only") + self.encoder = encoder.vision_model + self.vision_config = encoder.config.vision_config + else: + self.encoder = encoder + self.vision_config = getattr(encoder, "config", None) + + # Model type from config + self.is_cnn = self.config.model_type == "cnn" + + # For CNNs, initialize backbone + if self.is_cnn: + self._setup_cnn_backbone() + + self._freeze_encoder() + + # Extract image keys from input_features + self.image_keys = [ + key.replace(".", "_") for key in config.input_features if key.startswith(OBS_IMAGE) + ] + + if self.is_cnn: + self.encoders = nn.ModuleDict() + for image_key in self.image_keys: + encoder = self._create_single_encoder() + self.encoders[image_key] = encoder + + self._build_classifier_head() + + def _setup_cnn_backbone(self): + """Set up CNN encoder""" + if hasattr(self.encoder, "fc"): + self.feature_dim = self.encoder.fc.in_features + self.encoder = nn.Sequential(*list(self.encoder.children())[:-1]) + elif hasattr(self.encoder.config, "hidden_sizes"): + self.feature_dim = self.encoder.config.hidden_sizes[-1] # Last channel dimension + else: + raise ValueError("Unsupported CNN architecture") + + def _freeze_encoder(self) -> None: + """Freeze the encoder parameters.""" + for param in self.encoder.parameters(): + param.requires_grad = False + + def _create_single_encoder(self): + encoder = nn.Sequential( + self.encoder, + SpatialLearnedEmbeddings( + height=4, + width=4, + channel=self.feature_dim, + num_features=self.config.image_embedding_pooling_dim, + ), + nn.Dropout(self.config.dropout_rate), + nn.Linear(self.feature_dim * self.config.image_embedding_pooling_dim, self.config.latent_dim), + nn.LayerNorm(self.config.latent_dim), + nn.Tanh(), + ) + + return encoder + + def _build_classifier_head(self) -> None: + """Initialize the classifier head architecture.""" + # Get input dimension based on model type + if self.is_cnn: + input_dim = self.config.latent_dim + else: # Transformer models + if hasattr(self.encoder.config, "hidden_size"): + input_dim = self.encoder.config.hidden_size + else: + raise ValueError("Unsupported transformer architecture since hidden_size is not found") + + self.classifier_head = nn.Sequential( + nn.Linear(input_dim * self.config.num_cameras, self.config.hidden_dim), + nn.Dropout(self.config.dropout_rate), + nn.LayerNorm(self.config.hidden_dim), + nn.ReLU(), + nn.Linear( + self.config.hidden_dim, + 1 if self.config.num_classes == 2 else self.config.num_classes, + ), + ) + + def _get_encoder_output(self, x: torch.Tensor, image_key: str) -> torch.Tensor: + """Extract the appropriate output from the encoder.""" + with torch.no_grad(): + if self.is_cnn: + # The HF ResNet applies pooling internally + outputs = self.encoders[image_key](x) + return outputs + else: # Transformer models + outputs = self.encoder(x) + return outputs.last_hidden_state[:, 0, :] + + def extract_images_and_labels(self, batch: dict[str, Tensor]) -> tuple[list, Tensor]: + """Extract image tensors and label tensors from batch.""" + # Check for both OBS_IMAGE and OBS_IMAGES prefixes + images = [batch[key] for key in self.config.input_features if key.startswith(OBS_IMAGE)] + labels = batch[REWARD] + + return images, labels + + def predict(self, xs: list) -> ClassifierOutput: + """Forward pass of the classifier for inference.""" + encoder_outputs = torch.hstack( + [self._get_encoder_output(x, img_key) for x, img_key in zip(xs, self.image_keys, strict=True)] + ) + logits = self.classifier_head(encoder_outputs) + + if self.config.num_classes == 2: + logits = logits.squeeze(-1) + probabilities = torch.sigmoid(logits) + else: + probabilities = torch.softmax(logits, dim=-1) + + return ClassifierOutput(logits=logits, probabilities=probabilities, hidden_states=encoder_outputs) + + def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict[str, Tensor]]: + """Standard forward pass for training compatible with train.py.""" + # Normalize inputs if needed + batch = self.normalize_inputs(batch) + batch = self.normalize_targets(batch) + + # Extract images and labels + images, labels = self.extract_images_and_labels(batch) + + # Get predictions + outputs = self.predict(images) + + # Calculate loss + if self.config.num_classes == 2: + # Binary classification + loss = nn.functional.binary_cross_entropy_with_logits(outputs.logits, labels) + predictions = (torch.sigmoid(outputs.logits) > 0.5).float() + else: + # Multi-class classification + loss = nn.functional.cross_entropy(outputs.logits, labels.long()) + predictions = torch.argmax(outputs.logits, dim=1) + + # Calculate accuracy for logging + correct = (predictions == labels).sum().item() + total = labels.size(0) + accuracy = 100 * correct / total + + # Return loss and metrics for logging + output_dict = { + "accuracy": accuracy, + "correct": correct, + "total": total, + } + + return loss, output_dict + + def predict_reward(self, batch, threshold=0.5): + """Eval method. Returns predicted reward with the decision threshold as argument.""" + # Check for both OBS_IMAGE and OBS_IMAGES prefixes + batch = self.normalize_inputs(batch) + batch = self.normalize_targets(batch) + + # Extract images from batch dict + images = [batch[key] for key in self.config.input_features if key.startswith(OBS_IMAGE)] + + if self.config.num_classes == 2: + probs = self.predict(images).probabilities + logging.debug(f"Predicted reward images: {probs}") + return (probs > threshold).float() + else: + return torch.argmax(self.predict(images).probabilities, dim=1) + + def get_optim_params(self): + """Return optimizer parameters for the policy.""" + return self.parameters() + + def select_action(self, batch: dict[str, Tensor]) -> Tensor: + """ + This method is required by PreTrainedPolicy but not used for reward classifiers. + The reward classifier is not an actor and does not select actions. + """ + raise NotImplementedError("Reward classifiers do not select actions") + + def reset(self): + """ + This method is required by PreTrainedPolicy but not used for reward classifiers. + The reward classifier is not an actor and does not select actions. + """ + pass diff --git a/lerobot/common/robots/so100_follower/__init__.py b/lerobot/common/robots/so100_follower/__init__.py index 087fd64562..63c3e1c17a 100644 --- a/lerobot/common/robots/so100_follower/__init__.py +++ b/lerobot/common/robots/so100_follower/__init__.py @@ -1,2 +1,3 @@ -from .config_so100_follower import SO100FollowerConfig +from .config_so100_follower import SO100FollowerConfig, SO100FollowerEndEffectorConfig from .so100_follower import SO100Follower +from .so100_follower_end_effector import SO100FollowerEndEffector diff --git a/lerobot/common/robots/so100_follower/config_so100_follower.py b/lerobot/common/robots/so100_follower/config_so100_follower.py index 2a5a966ee2..b76675d26a 100644 --- a/lerobot/common/robots/so100_follower/config_so100_follower.py +++ b/lerobot/common/robots/so100_follower/config_so100_follower.py @@ -37,3 +37,27 @@ class SO100FollowerConfig(RobotConfig): # Set to `True` for backward compatibility with previous policies/dataset use_degrees: bool = False + + +@RobotConfig.register_subclass("so100_follower_end_effector") +@dataclass +class SO100FollowerEndEffectorConfig(SO100FollowerConfig): + """Configuration for the SO100FollowerEndEffector robot.""" + + # Default bounds for the end-effector position (in meters) + end_effector_bounds: dict[str, list[float]] = field( + default_factory=lambda: { + "min": [-1.0, -1.0, -1.0], # min x, y, z + "max": [1.0, 1.0, 1.0], # max x, y, z + } + ) + + max_gripper_pos: float = 50 + + end_effector_step_sizes: dict[str, float] = field( + default_factory=lambda: { + "x": 0.02, + "y": 0.02, + "z": 0.02, + } + ) diff --git a/lerobot/common/robots/so100_follower/so100_follower_end_effector.py b/lerobot/common/robots/so100_follower/so100_follower_end_effector.py new file mode 100644 index 0000000000..82e89305b3 --- /dev/null +++ b/lerobot/common/robots/so100_follower/so100_follower_end_effector.py @@ -0,0 +1,193 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from typing import Any + +import numpy as np + +from lerobot.common.cameras import make_cameras_from_configs +from lerobot.common.errors import DeviceNotConnectedError +from lerobot.common.model.kinematics import RobotKinematics +from lerobot.common.motors import Motor, MotorNormMode +from lerobot.common.motors.feetech import FeetechMotorsBus + +from . import SO100Follower +from .config_so100_follower import SO100FollowerEndEffectorConfig + +logger = logging.getLogger(__name__) +EE_FRAME = "gripper_tip" + + +class SO100FollowerEndEffector(SO100Follower): + """ + SO100Follower robot with end-effector space control. + + This robot inherits from SO100Follower but transforms actions from + end-effector space to joint space before sending them to the motors. + """ + + config_class = SO100FollowerEndEffectorConfig + name = "so100_follower_end_effector" + + def __init__(self, config: SO100FollowerEndEffectorConfig): + super().__init__(config) + self.bus = FeetechMotorsBus( + port=self.config.port, + motors={ + "shoulder_pan": Motor(1, "sts3215", MotorNormMode.DEGREES), + "shoulder_lift": Motor(2, "sts3215", MotorNormMode.DEGREES), + "elbow_flex": Motor(3, "sts3215", MotorNormMode.DEGREES), + "wrist_flex": Motor(4, "sts3215", MotorNormMode.DEGREES), + "wrist_roll": Motor(5, "sts3215", MotorNormMode.DEGREES), + "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), + }, + calibration=self.calibration, + ) + + self.cameras = make_cameras_from_configs(config.cameras) + + self.config = config + + # Initialize the kinematics module for the so100 robot + self.kinematics = RobotKinematics(robot_type="so_new_calibration") + + # Store the bounds for end-effector position + self.end_effector_bounds = self.config.end_effector_bounds + + self.current_ee_pos = None + self.current_joint_pos = None + + @property + def action_features(self) -> dict[str, Any]: + """ + Define action features for end-effector control. + Returns dictionary with dtype, shape, and names. + """ + return { + "dtype": "float32", + "shape": (4,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3}, + } + + def send_action(self, action: dict[str, Any]) -> dict[str, Any]: + """ + Transform action from end-effector space to joint space and send to motors. + + Args: + action: Dictionary with keys 'delta_x', 'delta_y', 'delta_z' for end-effector control + or a numpy array with [delta_x, delta_y, delta_z] + + Returns: + The joint-space action that was sent to the motors + """ + + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + # Convert action to numpy array if not already + if isinstance(action, dict): + if all(k in action for k in ["delta_x", "delta_y", "delta_z"]): + delta_ee = np.array( + [ + action["delta_x"] * self.config.end_effector_step_sizes["x"], + action["delta_y"] * self.config.end_effector_step_sizes["y"], + action["delta_z"] * self.config.end_effector_step_sizes["z"], + ], + dtype=np.float32, + ) + if "gripper" not in action: + action["gripper"] = [1.0] + action = np.append(delta_ee, action["gripper"]) + else: + logger.warning( + f"Expected action keys 'delta_x', 'delta_y', 'delta_z', got {list(action.keys())}" + ) + action = np.zeros(4, dtype=np.float32) + + if self.current_joint_pos is None: + # Read current joint positions + current_joint_pos = self.bus.sync_read("Present_Position") + self.current_joint_pos = np.array([current_joint_pos[name] for name in self.bus.motors]) + + # Calculate current end-effector position using forward kinematics + if self.current_ee_pos is None: + self.current_ee_pos = self.kinematics.forward_kinematics(self.current_joint_pos, frame=EE_FRAME) + + # Set desired end-effector position by adding delta + desired_ee_pos = np.eye(4) + desired_ee_pos[:3, :3] = self.current_ee_pos[:3, :3] # Keep orientation + + # Add delta to position and clip to bounds + desired_ee_pos[:3, 3] = self.current_ee_pos[:3, 3] + action[:3] + if self.end_effector_bounds is not None: + desired_ee_pos[:3, 3] = np.clip( + desired_ee_pos[:3, 3], + self.end_effector_bounds["min"], + self.end_effector_bounds["max"], + ) + + # Compute inverse kinematics to get joint positions + target_joint_values_in_degrees = self.kinematics.ik( + self.current_joint_pos, desired_ee_pos, position_only=True, frame=EE_FRAME + ) + + target_joint_values_in_degrees = np.clip(target_joint_values_in_degrees, -180.0, 180.0) + # Create joint space action dictionary + joint_action = { + f"{key}.pos": target_joint_values_in_degrees[i] for i, key in enumerate(self.bus.motors.keys()) + } + + # Handle gripper separately if included in action + # Gripper delta action is in the range 0 - 2, + # We need to shift the action to the range -1, 1 so that we can expand it to -Max_gripper_pos, Max_gripper_pos + joint_action["gripper.pos"] = np.clip( + self.current_joint_pos[-1] + (action[-1] - 1) * self.config.max_gripper_pos, + 5, + self.config.max_gripper_pos, + ) + + self.current_ee_pos = desired_ee_pos.copy() + self.current_joint_pos = target_joint_values_in_degrees.copy() + self.current_joint_pos[-1] = joint_action["gripper.pos"] + + # Send joint space action to parent class + return super().send_action(joint_action) + + def get_observation(self) -> dict[str, Any]: + if not self.is_connected: + raise DeviceNotConnectedError(f"{self} is not connected.") + + # Read arm position + start = time.perf_counter() + obs_dict = self.bus.sync_read("Present_Position") + obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read state: {dt_ms:.1f}ms") + + # Capture images from cameras + for cam_key, cam in self.cameras.items(): + start = time.perf_counter() + obs_dict[cam_key] = cam.async_read() + dt_ms = (time.perf_counter() - start) * 1e3 + logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") + + return obs_dict + + def reset(self): + self.current_ee_pos = None + self.current_joint_pos = None diff --git a/lerobot/common/robots/utils.py b/lerobot/common/robots/utils.py index d100c8366c..ccc1c58e86 100644 --- a/lerobot/common/robots/utils.py +++ b/lerobot/common/robots/utils.py @@ -29,6 +29,10 @@ def make_robot_from_config(config: RobotConfig) -> Robot: from .so100_follower import SO100Follower return SO100Follower(config) + elif config.type == "so100_follower_end_effector": + from .so100_follower import SO100FollowerEndEffector + + return SO100FollowerEndEffector(config) elif config.type == "so101_follower": from .so101_follower import SO101Follower diff --git a/lerobot/common/teleoperators/gamepad/__init__.py b/lerobot/common/teleoperators/gamepad/__init__.py new file mode 100644 index 0000000000..6f9f7fbd91 --- /dev/null +++ b/lerobot/common/teleoperators/gamepad/__init__.py @@ -0,0 +1,18 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .configuration_gamepad import GamepadTeleopConfig +from .teleop_gamepad import GamepadTeleop diff --git a/lerobot/common/teleoperators/gamepad/configuration_gamepad.py b/lerobot/common/teleoperators/gamepad/configuration_gamepad.py new file mode 100644 index 0000000000..b3a565c072 --- /dev/null +++ b/lerobot/common/teleoperators/gamepad/configuration_gamepad.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass + +from ..config import TeleoperatorConfig + + +@TeleoperatorConfig.register_subclass("gamepad") +@dataclass +class GamepadTeleopConfig(TeleoperatorConfig): + use_gripper: bool = True diff --git a/lerobot/common/teleoperators/gamepad/gamepad_utils.py b/lerobot/common/teleoperators/gamepad/gamepad_utils.py new file mode 100644 index 0000000000..21a293c771 --- /dev/null +++ b/lerobot/common/teleoperators/gamepad/gamepad_utils.py @@ -0,0 +1,480 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + + +class InputController: + """Base class for input controllers that generate motion deltas.""" + + def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0): + """ + Initialize the controller. + + Args: + x_step_size: Base movement step size in meters + y_step_size: Base movement step size in meters + z_step_size: Base movement step size in meters + """ + self.x_step_size = x_step_size + self.y_step_size = y_step_size + self.z_step_size = z_step_size + self.running = True + self.episode_end_status = None # None, "success", or "failure" + self.intervention_flag = False + self.open_gripper_command = False + self.close_gripper_command = False + + def start(self): + """Start the controller and initialize resources.""" + pass + + def stop(self): + """Stop the controller and release resources.""" + pass + + def get_deltas(self): + """Get the current movement deltas (dx, dy, dz) in meters.""" + return 0.0, 0.0, 0.0 + + def should_quit(self): + """Return True if the user has requested to quit.""" + return not self.running + + def update(self): + """Update controller state - call this once per frame.""" + pass + + def __enter__(self): + """Support for use in 'with' statements.""" + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Ensure resources are released when exiting 'with' block.""" + self.stop() + + def get_episode_end_status(self): + """ + Get the current episode end status. + + Returns: + None if episode should continue, "success" or "failure" otherwise + """ + status = self.episode_end_status + self.episode_end_status = None # Reset after reading + return status + + def should_intervene(self): + """Return True if intervention flag was set.""" + return self.intervention_flag + + def gripper_command(self): + """Return the current gripper command.""" + if self.open_gripper_command == self.close_gripper_command: + return "stay" + elif self.open_gripper_command: + return "open" + elif self.close_gripper_command: + return "close" + + +class KeyboardController(InputController): + """Generate motion deltas from keyboard input.""" + + def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0): + super().__init__(x_step_size, y_step_size, z_step_size) + self.key_states = { + "forward_x": False, + "backward_x": False, + "forward_y": False, + "backward_y": False, + "forward_z": False, + "backward_z": False, + "quit": False, + "success": False, + "failure": False, + } + self.listener = None + + def start(self): + """Start the keyboard listener.""" + from pynput import keyboard + + def on_press(key): + try: + if key == keyboard.Key.up: + self.key_states["forward_x"] = True + elif key == keyboard.Key.down: + self.key_states["backward_x"] = True + elif key == keyboard.Key.left: + self.key_states["forward_y"] = True + elif key == keyboard.Key.right: + self.key_states["backward_y"] = True + elif key == keyboard.Key.shift: + self.key_states["backward_z"] = True + elif key == keyboard.Key.shift_r: + self.key_states["forward_z"] = True + elif key == keyboard.Key.esc: + self.key_states["quit"] = True + self.running = False + return False + elif key == keyboard.Key.enter: + self.key_states["success"] = True + self.episode_end_status = "success" + elif key == keyboard.Key.backspace: + self.key_states["failure"] = True + self.episode_end_status = "failure" + except AttributeError: + pass + + def on_release(key): + try: + if key == keyboard.Key.up: + self.key_states["forward_x"] = False + elif key == keyboard.Key.down: + self.key_states["backward_x"] = False + elif key == keyboard.Key.left: + self.key_states["forward_y"] = False + elif key == keyboard.Key.right: + self.key_states["backward_y"] = False + elif key == keyboard.Key.shift: + self.key_states["backward_z"] = False + elif key == keyboard.Key.shift_r: + self.key_states["forward_z"] = False + elif key == keyboard.Key.enter: + self.key_states["success"] = False + elif key == keyboard.Key.backspace: + self.key_states["failure"] = False + except AttributeError: + pass + + self.listener = keyboard.Listener(on_press=on_press, on_release=on_release) + self.listener.start() + + print("Keyboard controls:") + print(" Arrow keys: Move in X-Y plane") + print(" Shift and Shift_R: Move in Z axis") + print(" Enter: End episode with SUCCESS") + print(" Backspace: End episode with FAILURE") + print(" ESC: Exit") + + def stop(self): + """Stop the keyboard listener.""" + if self.listener and self.listener.is_alive(): + self.listener.stop() + + def get_deltas(self): + """Get the current movement deltas from keyboard state.""" + delta_x = delta_y = delta_z = 0.0 + + if self.key_states["forward_x"]: + delta_x += self.x_step_size + if self.key_states["backward_x"]: + delta_x -= self.x_step_size + if self.key_states["forward_y"]: + delta_y += self.y_step_size + if self.key_states["backward_y"]: + delta_y -= self.y_step_size + if self.key_states["forward_z"]: + delta_z += self.z_step_size + if self.key_states["backward_z"]: + delta_z -= self.z_step_size + + return delta_x, delta_y, delta_z + + def should_quit(self): + """Return True if ESC was pressed.""" + return self.key_states["quit"] + + def should_save(self): + """Return True if Enter was pressed (save episode).""" + return self.key_states["success"] or self.key_states["failure"] + + +class GamepadController(InputController): + """Generate motion deltas from gamepad input.""" + + def __init__(self, x_step_size=1.0, y_step_size=1.0, z_step_size=1.0, deadzone=0.1): + super().__init__(x_step_size, y_step_size, z_step_size) + self.deadzone = deadzone + self.joystick = None + self.intervention_flag = False + + def start(self): + """Initialize pygame and the gamepad.""" + import pygame + + pygame.init() + pygame.joystick.init() + + if pygame.joystick.get_count() == 0: + logging.error("No gamepad detected. Please connect a gamepad and try again.") + self.running = False + return + + self.joystick = pygame.joystick.Joystick(0) + self.joystick.init() + logging.info(f"Initialized gamepad: {self.joystick.get_name()}") + + print("Gamepad controls:") + print(" Left analog stick: Move in X-Y plane") + print(" Right analog stick (vertical): Move in Z axis") + print(" B/Circle button: Exit") + print(" Y/Triangle button: End episode with SUCCESS") + print(" A/Cross button: End episode with FAILURE") + print(" X/Square button: Rerecord episode") + + def stop(self): + """Clean up pygame resources.""" + import pygame + + if pygame.joystick.get_init(): + if self.joystick: + self.joystick.quit() + pygame.joystick.quit() + pygame.quit() + + def update(self): + """Process pygame events to get fresh gamepad readings.""" + import pygame + + for event in pygame.event.get(): + if event.type == pygame.JOYBUTTONDOWN: + if event.button == 3: + self.episode_end_status = "success" + # A button (1) for failure + elif event.button == 1: + self.episode_end_status = "failure" + # X button (0) for rerecord + elif event.button == 0: + self.episode_end_status = "rerecord_episode" + + # RB button (6) for closing gripper + elif event.button == 6: + self.close_gripper_command = True + + # LT button (7) for opening gripper + elif event.button == 7: + self.open_gripper_command = True + + # Reset episode status on button release + elif event.type == pygame.JOYBUTTONUP: + if event.button in [0, 2, 3]: + self.episode_end_status = None + + elif event.button == 6: + self.close_gripper_command = False + + elif event.button == 7: + self.open_gripper_command = False + + # Check for RB button (typically button 5) for intervention flag + if self.joystick.get_button(5): + self.intervention_flag = True + else: + self.intervention_flag = False + + def get_deltas(self): + """Get the current movement deltas from gamepad state.""" + import pygame + + try: + # Read joystick axes + # Left stick X and Y (typically axes 0 and 1) + x_input = self.joystick.get_axis(0) # Left/Right + y_input = self.joystick.get_axis(1) # Up/Down (often inverted) + + # Right stick Y (typically axis 3 or 4) + z_input = self.joystick.get_axis(3) # Up/Down for Z + + # Apply deadzone to avoid drift + x_input = 0 if abs(x_input) < self.deadzone else x_input + y_input = 0 if abs(y_input) < self.deadzone else y_input + z_input = 0 if abs(z_input) < self.deadzone else z_input + + # Calculate deltas (note: may need to invert axes depending on controller) + delta_x = -y_input * self.y_step_size # Forward/backward + delta_y = -x_input * self.x_step_size # Left/right + delta_z = -z_input * self.z_step_size # Up/down + + return delta_x, delta_y, delta_z + + except pygame.error: + logging.error("Error reading gamepad. Is it still connected?") + return 0.0, 0.0, 0.0 + + +class GamepadControllerHID(InputController): + """Generate motion deltas from gamepad input using HIDAPI.""" + + def __init__( + self, + x_step_size=1.0, + y_step_size=1.0, + z_step_size=1.0, + deadzone=0.1, + ): + """ + Initialize the HID gamepad controller. + + Args: + step_size: Base movement step size in meters + z_scale: Scaling factor for Z-axis movement + deadzone: Joystick deadzone to prevent drift + """ + super().__init__(x_step_size, y_step_size, z_step_size) + self.deadzone = deadzone + self.device = None + self.device_info = None + + # Movement values (normalized from -1.0 to 1.0) + self.left_x = 0.0 + self.left_y = 0.0 + self.right_x = 0.0 + self.right_y = 0.0 + + # Button states + self.buttons = {} + self.quit_requested = False + self.save_requested = False + + def find_device(self): + """Look for the gamepad device by vendor and product ID.""" + import hid + + devices = hid.enumerate() + for device in devices: + device_name = device["product_string"] + if any(controller in device_name for controller in ["Logitech", "Xbox", "PS4", "PS5"]): + return device + + logging.error( + "No gamepad found, check the connection and the product string in HID to add your gamepad" + ) + return None + + def start(self): + """Connect to the gamepad using HIDAPI.""" + import hid + + self.device_info = self.find_device() + if not self.device_info: + self.running = False + return + + try: + logging.info(f"Connecting to gamepad at path: {self.device_info['path']}") + self.device = hid.device() + self.device.open_path(self.device_info["path"]) + self.device.set_nonblocking(1) + + manufacturer = self.device.get_manufacturer_string() + product = self.device.get_product_string() + logging.info(f"Connected to {manufacturer} {product}") + + logging.info("Gamepad controls (HID mode):") + logging.info(" Left analog stick: Move in X-Y plane") + logging.info(" Right analog stick: Move in Z axis (vertical)") + logging.info(" Button 1/B/Circle: Exit") + logging.info(" Button 2/A/Cross: End episode with SUCCESS") + logging.info(" Button 3/X/Square: End episode with FAILURE") + + except OSError as e: + logging.error(f"Error opening gamepad: {e}") + logging.error("You might need to run this with sudo/admin privileges on some systems") + self.running = False + + def stop(self): + """Close the HID device connection.""" + if self.device: + self.device.close() + self.device = None + + def update(self): + """ + Read and process the latest gamepad data. + Due to an issue with the HIDAPI, we need to read the read the device several times in order to get a stable reading + """ + for _ in range(10): + self._update() + + def _update(self): + """Read and process the latest gamepad data.""" + if not self.device or not self.running: + return + + try: + # Read data from the gamepad + data = self.device.read(64) + # Interpret gamepad data - this will vary by controller model + # These offsets are for the Logitech RumblePad 2 + if data and len(data) >= 8: + # Normalize joystick values from 0-255 to -1.0-1.0 + self.left_x = (data[1] - 128) / 128.0 + self.left_y = (data[2] - 128) / 128.0 + self.right_x = (data[3] - 128) / 128.0 + self.right_y = (data[4] - 128) / 128.0 + + # Apply deadzone + self.left_x = 0 if abs(self.left_x) < self.deadzone else self.left_x + self.left_y = 0 if abs(self.left_y) < self.deadzone else self.left_y + self.right_x = 0 if abs(self.right_x) < self.deadzone else self.right_x + self.right_y = 0 if abs(self.right_y) < self.deadzone else self.right_y + + # Parse button states (byte 5 in the Logitech RumblePad 2) + buttons = data[5] + + # Check if RB is pressed then the intervention flag should be set + self.intervention_flag = data[6] in [2, 6, 10, 14] + + # Check if RT is pressed + self.open_gripper_command = data[6] in [8, 10, 12] + + # Check if LT is pressed + self.close_gripper_command = data[6] in [4, 6, 12] + + # Check if Y/Triangle button (bit 7) is pressed for saving + # Check if X/Square button (bit 5) is pressed for failure + # Check if A/Cross button (bit 4) is pressed for rerecording + if buttons & 1 << 7: + self.episode_end_status = "success" + elif buttons & 1 << 5: + self.episode_end_status = "failure" + elif buttons & 1 << 4: + self.episode_end_status = "rerecord_episode" + else: + self.episode_end_status = None + + except OSError as e: + logging.error(f"Error reading from gamepad: {e}") + + def get_deltas(self): + """Get the current movement deltas from gamepad state.""" + # Calculate deltas - invert as needed based on controller orientation + delta_x = -self.left_y * self.x_step_size # Forward/backward + delta_y = -self.left_x * self.y_step_size # Left/right + delta_z = -self.right_y * self.z_step_size # Up/down + + return delta_x, delta_y, delta_z + + def should_quit(self): + """Return True if quit button was pressed.""" + return self.quit_requested + + def should_save(self): + """Return True if save button was pressed.""" + return self.save_requested diff --git a/lerobot/common/teleoperators/gamepad/teleop_gamepad.py b/lerobot/common/teleoperators/gamepad/teleop_gamepad.py new file mode 100644 index 0000000000..98a0647e21 --- /dev/null +++ b/lerobot/common/teleoperators/gamepad/teleop_gamepad.py @@ -0,0 +1,138 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from enum import IntEnum +from typing import Any + +import numpy as np + +from ..teleoperator import Teleoperator +from .configuration_gamepad import GamepadTeleopConfig + + +class GripperAction(IntEnum): + CLOSE = 0 + STAY = 1 + OPEN = 2 + + +gripper_action_map = { + "close": GripperAction.CLOSE.value, + "open": GripperAction.OPEN.value, + "stay": GripperAction.STAY.value, +} + + +class GamepadTeleop(Teleoperator): + """ + Teleop class to use gamepad inputs for control. + """ + + config_class = GamepadTeleopConfig + name = "gamepad" + + def __init__(self, config: GamepadTeleopConfig): + super().__init__(config) + self.config = config + self.robot_type = config.type + + self.gamepad = None + + @property + def action_features(self) -> dict: + if self.config.use_gripper: + return { + "dtype": "float32", + "shape": (4,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3}, + } + else: + return { + "dtype": "float32", + "shape": (3,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2}, + } + + @property + def feedback_features(self) -> dict: + return {} + + def connect(self) -> None: + # use HidApi for macos + if sys.platform == "darwin": + # NOTE: On macOS, pygame doesn’t reliably detect input from some controllers so we fall back to hidapi + from .gamepad_utils import GamepadControllerHID as Gamepad + else: + from .gamepad_utils import GamepadController as Gamepad + + self.gamepad = Gamepad() + self.gamepad.start() + + def get_action(self) -> dict[str, Any]: + # Update the controller to get fresh inputs + self.gamepad.update() + + # Get movement deltas from the controller + delta_x, delta_y, delta_z = self.gamepad.get_deltas() + + # Create action from gamepad input + gamepad_action = np.array([delta_x, delta_y, delta_z], dtype=np.float32) + + action_dict = { + "delta_x": gamepad_action[0], + "delta_y": gamepad_action[1], + "delta_z": gamepad_action[2], + } + + # Default gripper action is to stay + gripper_action = GripperAction.STAY.value + if self.config.use_gripper: + gripper_command = self.gamepad.gripper_command() + gripper_action = gripper_action_map[gripper_command] + action_dict["gripper"] = gripper_action + + return action_dict + + def disconnect(self) -> None: + """Disconnect from the gamepad.""" + if self.gamepad is not None: + self.gamepad.stop() + self.gamepad = None + + def is_connected(self) -> bool: + """Check if gamepad is connected.""" + return self.gamepad is not None + + def calibrate(self) -> None: + """Calibrate the gamepad.""" + # No calibration needed for gamepad + pass + + def is_calibrated(self) -> bool: + """Check if gamepad is calibrated.""" + # Gamepad doesn't require calibration + return True + + def configure(self) -> None: + """Configure the gamepad.""" + # No additional configuration needed + pass + + def send_feedback(self, feedback: dict) -> None: + """Send feedback to the gamepad.""" + # Gamepad doesn't support feedback + pass diff --git a/lerobot/common/teleoperators/so101_leader/config_so101_leader.py b/lerobot/common/teleoperators/so101_leader/config_so101_leader.py index 5f2e110da1..8d91c32dfe 100644 --- a/lerobot/common/teleoperators/so101_leader/config_so101_leader.py +++ b/lerobot/common/teleoperators/so101_leader/config_so101_leader.py @@ -24,3 +24,5 @@ class SO101LeaderConfig(TeleoperatorConfig): # Port to connect to the arm port: str + + use_degrees: bool = False diff --git a/lerobot/common/teleoperators/so101_leader/so101_leader.py b/lerobot/common/teleoperators/so101_leader/so101_leader.py index 34ad31dafe..d324e2a888 100644 --- a/lerobot/common/teleoperators/so101_leader/so101_leader.py +++ b/lerobot/common/teleoperators/so101_leader/so101_leader.py @@ -41,14 +41,15 @@ class SO101Leader(Teleoperator): def __init__(self, config: SO101LeaderConfig): super().__init__(config) self.config = config + norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100 self.bus = FeetechMotorsBus( port=self.config.port, motors={ - "shoulder_pan": Motor(1, "sts3215", MotorNormMode.RANGE_M100_100), - "shoulder_lift": Motor(2, "sts3215", MotorNormMode.RANGE_M100_100), - "elbow_flex": Motor(3, "sts3215", MotorNormMode.RANGE_M100_100), - "wrist_flex": Motor(4, "sts3215", MotorNormMode.RANGE_M100_100), - "wrist_roll": Motor(5, "sts3215", MotorNormMode.RANGE_M100_100), + "shoulder_pan": Motor(1, "sts3215", norm_mode_body), + "shoulder_lift": Motor(2, "sts3215", norm_mode_body), + "elbow_flex": Motor(3, "sts3215", norm_mode_body), + "wrist_flex": Motor(4, "sts3215", norm_mode_body), + "wrist_roll": Motor(5, "sts3215", norm_mode_body), "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), }, calibration=self.calibration, diff --git a/lerobot/common/teleoperators/utils.py b/lerobot/common/teleoperators/utils.py index 4942084ac7..d7b7bcf0e6 100644 --- a/lerobot/common/teleoperators/utils.py +++ b/lerobot/common/teleoperators/utils.py @@ -45,5 +45,9 @@ def make_teleoperator_from_config(config: TeleoperatorConfig) -> Teleoperator: from tests.mocks.mock_teleop import MockTeleop return MockTeleop(config) + elif config.type == "gamepad": + from .gamepad.teleop_gamepad import GamepadTeleop + + return GamepadTeleop(config) else: raise ValueError(config.type) diff --git a/lerobot/common/transport/services.proto b/lerobot/common/transport/services.proto new file mode 100644 index 0000000000..29d00005a6 --- /dev/null +++ b/lerobot/common/transport/services.proto @@ -0,0 +1,59 @@ +// Copyright 2024 The HuggingFace Inc. team. +// All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// To generate a classes for transport part (services_pb2.py and services_pb2_grpc.py) use the following command: +// +// python -m grpc_tools.protoc -I . --python_out=. --grpc_python_out=. lerobot/common/transport/services.proto +// +// The command should be launched from the root of the project. + +syntax = "proto3"; + +package transport; + +// LearnerService: the Actor calls this to push transitions. +// The Learner implements this service. +service LearnerService { + // Actor -> Learner to store transitions + rpc StreamParameters(Empty) returns (stream Parameters); + rpc SendTransitions(stream Transition) returns (Empty); + rpc SendInteractions(stream InteractionMessage) returns (Empty); + rpc Ready(Empty) returns (Empty); +} + +enum TransferState { + TRANSFER_UNKNOWN = 0; + TRANSFER_BEGIN = 1; + TRANSFER_MIDDLE = 2; + TRANSFER_END = 3; +} + +// Messages +message Transition { + TransferState transfer_state = 1; + bytes data = 2; +} + +message Parameters { + TransferState transfer_state = 1; + bytes data = 2; +} + +message InteractionMessage { + TransferState transfer_state = 1; + bytes data = 2; +} + +message Empty {} diff --git a/lerobot/common/transport/services_pb2.py b/lerobot/common/transport/services_pb2.py new file mode 100644 index 0000000000..727beb60de --- /dev/null +++ b/lerobot/common/transport/services_pb2.py @@ -0,0 +1,45 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: lerobot/common/transport/services.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'lerobot/common/transport/services.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'lerobot/common/transport/services.proto\x12\ttransport\"L\n\nTransition\x12\x30\n\x0etransfer_state\x18\x01 \x01(\x0e\x32\x18.transport.TransferState\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"L\n\nParameters\x12\x30\n\x0etransfer_state\x18\x01 \x01(\x0e\x32\x18.transport.TransferState\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"T\n\x12InteractionMessage\x12\x30\n\x0etransfer_state\x18\x01 \x01(\x0e\x32\x18.transport.TransferState\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\x07\n\x05\x45mpty*`\n\rTransferState\x12\x14\n\x10TRANSFER_UNKNOWN\x10\x00\x12\x12\n\x0eTRANSFER_BEGIN\x10\x01\x12\x13\n\x0fTRANSFER_MIDDLE\x10\x02\x12\x10\n\x0cTRANSFER_END\x10\x03\x32\x81\x02\n\x0eLearnerService\x12=\n\x10StreamParameters\x12\x10.transport.Empty\x1a\x15.transport.Parameters0\x01\x12<\n\x0fSendTransitions\x12\x15.transport.Transition\x1a\x10.transport.Empty(\x01\x12\x45\n\x10SendInteractions\x12\x1d.transport.InteractionMessage\x1a\x10.transport.Empty(\x01\x12+\n\x05Ready\x12\x10.transport.Empty\x1a\x10.transport.Emptyb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'lerobot.common.transport.services_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + DESCRIPTOR._loaded_options = None + _globals['_TRANSFERSTATE']._serialized_start=305 + _globals['_TRANSFERSTATE']._serialized_end=401 + _globals['_TRANSITION']._serialized_start=54 + _globals['_TRANSITION']._serialized_end=130 + _globals['_PARAMETERS']._serialized_start=132 + _globals['_PARAMETERS']._serialized_end=208 + _globals['_INTERACTIONMESSAGE']._serialized_start=210 + _globals['_INTERACTIONMESSAGE']._serialized_end=294 + _globals['_EMPTY']._serialized_start=296 + _globals['_EMPTY']._serialized_end=303 + _globals['_LEARNERSERVICE']._serialized_start=404 + _globals['_LEARNERSERVICE']._serialized_end=661 +# @@protoc_insertion_point(module_scope) diff --git a/lerobot/common/transport/services_pb2_grpc.py b/lerobot/common/transport/services_pb2_grpc.py new file mode 100644 index 0000000000..5a7a924fd2 --- /dev/null +++ b/lerobot/common/transport/services_pb2_grpc.py @@ -0,0 +1,233 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +from lerobot.common.transport import services_pb2 as lerobot_dot_common_dot_transport_dot_services__pb2 + +GRPC_GENERATED_VERSION = '1.71.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in lerobot/common/transport/services_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class LearnerServiceStub: + """LearnerService: the Actor calls this to push transitions. + The Learner implements this service. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.StreamParameters = channel.unary_stream( + '/transport.LearnerService/StreamParameters', + request_serializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.SerializeToString, + response_deserializer=lerobot_dot_common_dot_transport_dot_services__pb2.Parameters.FromString, + _registered_method=True) + self.SendTransitions = channel.stream_unary( + '/transport.LearnerService/SendTransitions', + request_serializer=lerobot_dot_common_dot_transport_dot_services__pb2.Transition.SerializeToString, + response_deserializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + self.SendInteractions = channel.stream_unary( + '/transport.LearnerService/SendInteractions', + request_serializer=lerobot_dot_common_dot_transport_dot_services__pb2.InteractionMessage.SerializeToString, + response_deserializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + self.Ready = channel.unary_unary( + '/transport.LearnerService/Ready', + request_serializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.SerializeToString, + response_deserializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.FromString, + _registered_method=True) + + +class LearnerServiceServicer: + """LearnerService: the Actor calls this to push transitions. + The Learner implements this service. + """ + + def StreamParameters(self, request, context): + """Actor -> Learner to store transitions + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SendTransitions(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SendInteractions(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Ready(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_LearnerServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'StreamParameters': grpc.unary_stream_rpc_method_handler( + servicer.StreamParameters, + request_deserializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.FromString, + response_serializer=lerobot_dot_common_dot_transport_dot_services__pb2.Parameters.SerializeToString, + ), + 'SendTransitions': grpc.stream_unary_rpc_method_handler( + servicer.SendTransitions, + request_deserializer=lerobot_dot_common_dot_transport_dot_services__pb2.Transition.FromString, + response_serializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + 'SendInteractions': grpc.stream_unary_rpc_method_handler( + servicer.SendInteractions, + request_deserializer=lerobot_dot_common_dot_transport_dot_services__pb2.InteractionMessage.FromString, + response_serializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + 'Ready': grpc.unary_unary_rpc_method_handler( + servicer.Ready, + request_deserializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.FromString, + response_serializer=lerobot_dot_common_dot_transport_dot_services__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'transport.LearnerService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('transport.LearnerService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class LearnerService: + """LearnerService: the Actor calls this to push transitions. + The Learner implements this service. + """ + + @staticmethod + def StreamParameters(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/transport.LearnerService/StreamParameters', + lerobot_dot_common_dot_transport_dot_services__pb2.Empty.SerializeToString, + lerobot_dot_common_dot_transport_dot_services__pb2.Parameters.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SendTransitions(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_unary( + request_iterator, + target, + '/transport.LearnerService/SendTransitions', + lerobot_dot_common_dot_transport_dot_services__pb2.Transition.SerializeToString, + lerobot_dot_common_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SendInteractions(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_unary( + request_iterator, + target, + '/transport.LearnerService/SendInteractions', + lerobot_dot_common_dot_transport_dot_services__pb2.InteractionMessage.SerializeToString, + lerobot_dot_common_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Ready(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/transport.LearnerService/Ready', + lerobot_dot_common_dot_transport_dot_services__pb2.Empty.SerializeToString, + lerobot_dot_common_dot_transport_dot_services__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/lerobot/common/transport/utils.py b/lerobot/common/transport/utils.py new file mode 100644 index 0000000000..774721fc6d --- /dev/null +++ b/lerobot/common/transport/utils.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import logging +import pickle # nosec B403: Safe usage for internal serialization only +from multiprocessing import Event, Queue +from typing import Any + +import torch + +from lerobot.common.transport import services_pb2 +from lerobot.common.utils.transition import Transition + +CHUNK_SIZE = 2 * 1024 * 1024 # 2 MB + + +def bytes_buffer_size(buffer: io.BytesIO) -> int: + buffer.seek(0, io.SEEK_END) + result = buffer.tell() + buffer.seek(0) + return result + + +def send_bytes_in_chunks(buffer: bytes, message_class: Any, log_prefix: str = "", silent: bool = True): + buffer = io.BytesIO(buffer) + size_in_bytes = bytes_buffer_size(buffer) + + sent_bytes = 0 + + logging_method = logging.info if not silent else logging.debug + + logging_method(f"{log_prefix} Buffer size {size_in_bytes / 1024 / 1024} MB with") + + while sent_bytes < size_in_bytes: + transfer_state = services_pb2.TransferState.TRANSFER_MIDDLE + + if sent_bytes + CHUNK_SIZE >= size_in_bytes: + transfer_state = services_pb2.TransferState.TRANSFER_END + elif sent_bytes == 0: + transfer_state = services_pb2.TransferState.TRANSFER_BEGIN + + size_to_read = min(CHUNK_SIZE, size_in_bytes - sent_bytes) + chunk = buffer.read(size_to_read) + + yield message_class(transfer_state=transfer_state, data=chunk) + sent_bytes += size_to_read + logging_method(f"{log_prefix} Sent {sent_bytes}/{size_in_bytes} bytes with state {transfer_state}") + + logging_method(f"{log_prefix} Published {sent_bytes / 1024 / 1024} MB") + + +def receive_bytes_in_chunks(iterator, queue: Queue, shutdown_event: Event, log_prefix: str = ""): # type: ignore + bytes_buffer = io.BytesIO() + step = 0 + + logging.info(f"{log_prefix} Starting receiver") + for item in iterator: + logging.debug(f"{log_prefix} Received item") + if shutdown_event.is_set(): + logging.info(f"{log_prefix} Shutting down receiver") + return + + if item.transfer_state == services_pb2.TransferState.TRANSFER_BEGIN: + bytes_buffer.seek(0) + bytes_buffer.truncate(0) + bytes_buffer.write(item.data) + logging.debug(f"{log_prefix} Received data at step 0") + step = 0 + elif item.transfer_state == services_pb2.TransferState.TRANSFER_MIDDLE: + bytes_buffer.write(item.data) + step += 1 + logging.debug(f"{log_prefix} Received data at step {step}") + elif item.transfer_state == services_pb2.TransferState.TRANSFER_END: + bytes_buffer.write(item.data) + logging.debug(f"{log_prefix} Received data at step end size {bytes_buffer_size(bytes_buffer)}") + + queue.put(bytes_buffer.getvalue()) + + bytes_buffer.seek(0) + bytes_buffer.truncate(0) + step = 0 + + logging.debug(f"{log_prefix} Queue updated") + else: + logging.warning(f"{log_prefix} Received unknown transfer state {item.transfer_state}") + raise ValueError(f"Received unknown transfer state {item.transfer_state}") + + +def state_to_bytes(state_dict: dict[str, torch.Tensor]) -> bytes: + """Convert model state dict to flat array for transmission""" + buffer = io.BytesIO() + + torch.save(state_dict, buffer) + + return buffer.getvalue() + + +def bytes_to_state_dict(buffer: bytes) -> dict[str, torch.Tensor]: + buffer = io.BytesIO(buffer) + buffer.seek(0) + return torch.load(buffer, weights_only=True) + + +def python_object_to_bytes(python_object: Any) -> bytes: + return pickle.dumps(python_object) + + +def bytes_to_python_object(buffer: bytes) -> Any: + buffer = io.BytesIO(buffer) + buffer.seek(0) + obj = pickle.load(buffer) # nosec B301: Safe usage of pickle.load + # Add validation checks here + return obj + + +def bytes_to_transitions(buffer: bytes) -> list[Transition]: + buffer = io.BytesIO(buffer) + buffer.seek(0) + transitions = torch.load(buffer, weights_only=True) + return transitions + + +def transitions_to_bytes(transitions: list[Transition]) -> bytes: + buffer = io.BytesIO() + torch.save(transitions, buffer) + return buffer.getvalue() diff --git a/lerobot/common/utils/buffer.py b/lerobot/common/utils/buffer.py new file mode 100644 index 0000000000..9ae231ad92 --- /dev/null +++ b/lerobot/common/utils/buffer.py @@ -0,0 +1,841 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from contextlib import suppress +from typing import Callable, Sequence, TypedDict + +import torch +import torch.nn.functional as F # noqa: N812 +from tqdm import tqdm + +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.utils.transition import Transition + + +class BatchTransition(TypedDict): + state: dict[str, torch.Tensor] + action: torch.Tensor + reward: torch.Tensor + next_state: dict[str, torch.Tensor] + done: torch.Tensor + truncated: torch.Tensor + complementary_info: dict[str, torch.Tensor | float | int] | None = None + + +def random_crop_vectorized(images: torch.Tensor, output_size: tuple) -> torch.Tensor: + """ + Perform a per-image random crop over a batch of images in a vectorized way. + (Same as shown previously.) + """ + B, C, H, W = images.shape # noqa: N806 + crop_h, crop_w = output_size + + if crop_h > H or crop_w > W: + raise ValueError( + f"Requested crop size ({crop_h}, {crop_w}) is bigger than the image size ({H}, {W})." + ) + + tops = torch.randint(0, H - crop_h + 1, (B,), device=images.device) + lefts = torch.randint(0, W - crop_w + 1, (B,), device=images.device) + + rows = torch.arange(crop_h, device=images.device).unsqueeze(0) + tops.unsqueeze(1) + cols = torch.arange(crop_w, device=images.device).unsqueeze(0) + lefts.unsqueeze(1) + + rows = rows.unsqueeze(2).expand(-1, -1, crop_w) # (B, crop_h, crop_w) + cols = cols.unsqueeze(1).expand(-1, crop_h, -1) # (B, crop_h, crop_w) + + images_hwcn = images.permute(0, 2, 3, 1) # (B, H, W, C) + + # Gather pixels + cropped_hwcn = images_hwcn[torch.arange(B, device=images.device).view(B, 1, 1), rows, cols, :] + # cropped_hwcn => (B, crop_h, crop_w, C) + + cropped = cropped_hwcn.permute(0, 3, 1, 2) # (B, C, crop_h, crop_w) + return cropped + + +def random_shift(images: torch.Tensor, pad: int = 4): + """Vectorized random shift, imgs: (B,C,H,W), pad: #pixels""" + _, _, h, w = images.shape + images = F.pad(input=images, pad=(pad, pad, pad, pad), mode="replicate") + return random_crop_vectorized(images=images, output_size=(h, w)) + + +class ReplayBuffer: + def __init__( + self, + capacity: int, + device: str = "cuda:0", + state_keys: Sequence[str] | None = None, + image_augmentation_function: Callable | None = None, + use_drq: bool = True, + storage_device: str = "cpu", + optimize_memory: bool = False, + ): + """ + Replay buffer for storing transitions. + It will allocate tensors on the specified device, when the first transition is added. + NOTE: If you encounter memory issues, you can try to use the `optimize_memory` flag to save memory or + and use the `storage_device` flag to store the buffer on a different device. + Args: + capacity (int): Maximum number of transitions to store in the buffer. + device (str): The device where the tensors will be moved when sampling ("cuda:0" or "cpu"). + state_keys (List[str]): The list of keys that appear in `state` and `next_state`. + image_augmentation_function (Optional[Callable]): A function that takes a batch of images + and returns a batch of augmented images. If None, a default augmentation function is used. + use_drq (bool): Whether to use the default DRQ image augmentation style, when sampling in the buffer. + storage_device: The device (e.g. "cpu" or "cuda:0") where the data will be stored. + Using "cpu" can help save GPU memory. + optimize_memory (bool): If True, optimizes memory by not storing duplicate next_states when + they can be derived from states. This is useful for large datasets where next_state[i] = state[i+1]. + """ + if capacity <= 0: + raise ValueError("Capacity must be greater than 0.") + + self.capacity = capacity + self.device = device + self.storage_device = storage_device + self.position = 0 + self.size = 0 + self.initialized = False + self.optimize_memory = optimize_memory + + # Track episode boundaries for memory optimization + self.episode_ends = torch.zeros(capacity, dtype=torch.bool, device=storage_device) + + # If no state_keys provided, default to an empty list + self.state_keys = state_keys if state_keys is not None else [] + + self.image_augmentation_function = image_augmentation_function + + if image_augmentation_function is None: + base_function = functools.partial(random_shift, pad=4) + self.image_augmentation_function = torch.compile(base_function) + self.use_drq = use_drq + + def _initialize_storage( + self, + state: dict[str, torch.Tensor], + action: torch.Tensor, + complementary_info: dict[str, torch.Tensor] | None = None, + ): + """Initialize the storage tensors based on the first transition.""" + # Determine shapes from the first transition + state_shapes = {key: val.squeeze(0).shape for key, val in state.items()} + action_shape = action.squeeze(0).shape + + # Pre-allocate tensors for storage + self.states = { + key: torch.empty((self.capacity, *shape), device=self.storage_device) + for key, shape in state_shapes.items() + } + self.actions = torch.empty((self.capacity, *action_shape), device=self.storage_device) + self.rewards = torch.empty((self.capacity,), device=self.storage_device) + + if not self.optimize_memory: + # Standard approach: store states and next_states separately + self.next_states = { + key: torch.empty((self.capacity, *shape), device=self.storage_device) + for key, shape in state_shapes.items() + } + else: + # Memory-optimized approach: don't allocate next_states buffer + # Just create a reference to states for consistent API + self.next_states = self.states # Just a reference for API consistency + + self.dones = torch.empty((self.capacity,), dtype=torch.bool, device=self.storage_device) + self.truncateds = torch.empty((self.capacity,), dtype=torch.bool, device=self.storage_device) + + # Initialize storage for complementary_info + self.has_complementary_info = complementary_info is not None + self.complementary_info_keys = [] + self.complementary_info = {} + + if self.has_complementary_info: + self.complementary_info_keys = list(complementary_info.keys()) + # Pre-allocate tensors for each key in complementary_info + for key, value in complementary_info.items(): + if isinstance(value, torch.Tensor): + value_shape = value.squeeze(0).shape + self.complementary_info[key] = torch.empty( + (self.capacity, *value_shape), device=self.storage_device + ) + elif isinstance(value, (int, float)): + # Handle scalar values similar to reward + self.complementary_info[key] = torch.empty((self.capacity,), device=self.storage_device) + else: + raise ValueError(f"Unsupported type {type(value)} for complementary_info[{key}]") + + self.initialized = True + + def __len__(self): + return self.size + + def add( + self, + state: dict[str, torch.Tensor], + action: torch.Tensor, + reward: float, + next_state: dict[str, torch.Tensor], + done: bool, + truncated: bool, + complementary_info: dict[str, torch.Tensor] | None = None, + ): + """Saves a transition, ensuring tensors are stored on the designated storage device.""" + # Initialize storage if this is the first transition + if not self.initialized: + self._initialize_storage(state=state, action=action, complementary_info=complementary_info) + + # Store the transition in pre-allocated tensors + for key in self.states: + self.states[key][self.position].copy_(state[key].squeeze(dim=0)) + + if not self.optimize_memory: + # Only store next_states if not optimizing memory + self.next_states[key][self.position].copy_(next_state[key].squeeze(dim=0)) + + self.actions[self.position].copy_(action.squeeze(dim=0)) + self.rewards[self.position] = reward + self.dones[self.position] = done + self.truncateds[self.position] = truncated + + # Handle complementary_info if provided and storage is initialized + if complementary_info is not None and self.has_complementary_info: + # Store the complementary_info + for key in self.complementary_info_keys: + if key in complementary_info: + value = complementary_info[key] + if isinstance(value, torch.Tensor): + self.complementary_info[key][self.position].copy_(value.squeeze(dim=0)) + elif isinstance(value, (int, float)): + self.complementary_info[key][self.position] = value + + self.position = (self.position + 1) % self.capacity + self.size = min(self.size + 1, self.capacity) + + def sample(self, batch_size: int) -> BatchTransition: + """Sample a random batch of transitions and collate them into batched tensors.""" + if not self.initialized: + raise RuntimeError("Cannot sample from an empty buffer. Add transitions first.") + + batch_size = min(batch_size, self.size) + high = max(0, self.size - 1) if self.optimize_memory and self.size < self.capacity else self.size + + # Random indices for sampling - create on the same device as storage + idx = torch.randint(low=0, high=high, size=(batch_size,), device=self.storage_device) + + # Identify image keys that need augmentation + image_keys = [k for k in self.states if k.startswith("observation.image")] if self.use_drq else [] + + # Create batched state and next_state + batch_state = {} + batch_next_state = {} + + # First pass: load all state tensors to target device + for key in self.states: + batch_state[key] = self.states[key][idx].to(self.device) + + if not self.optimize_memory: + # Standard approach - load next_states directly + batch_next_state[key] = self.next_states[key][idx].to(self.device) + else: + # Memory-optimized approach - get next_state from the next index + next_idx = (idx + 1) % self.capacity + batch_next_state[key] = self.states[key][next_idx].to(self.device) + + # Apply image augmentation in a batched way if needed + if self.use_drq and image_keys: + # Concatenate all images from state and next_state + all_images = [] + for key in image_keys: + all_images.append(batch_state[key]) + all_images.append(batch_next_state[key]) + + # Optimization: Batch all images and apply augmentation once + all_images_tensor = torch.cat(all_images, dim=0) + augmented_images = self.image_augmentation_function(all_images_tensor) + + # Split the augmented images back to their sources + for i, key in enumerate(image_keys): + # Calculate offsets for the current image key: + # For each key, we have 2*batch_size images (batch_size for states, batch_size for next_states) + # States start at index i*2*batch_size and take up batch_size slots + batch_state[key] = augmented_images[i * 2 * batch_size : (i * 2 + 1) * batch_size] + # Next states start after the states at index (i*2+1)*batch_size and also take up batch_size slots + batch_next_state[key] = augmented_images[(i * 2 + 1) * batch_size : (i + 1) * 2 * batch_size] + + # Sample other tensors + batch_actions = self.actions[idx].to(self.device) + batch_rewards = self.rewards[idx].to(self.device) + batch_dones = self.dones[idx].to(self.device).float() + batch_truncateds = self.truncateds[idx].to(self.device).float() + + # Sample complementary_info if available + batch_complementary_info = None + if self.has_complementary_info: + batch_complementary_info = {} + for key in self.complementary_info_keys: + batch_complementary_info[key] = self.complementary_info[key][idx].to(self.device) + + return BatchTransition( + state=batch_state, + action=batch_actions, + reward=batch_rewards, + next_state=batch_next_state, + done=batch_dones, + truncated=batch_truncateds, + complementary_info=batch_complementary_info, + ) + + def get_iterator( + self, + batch_size: int, + async_prefetch: bool = True, + queue_size: int = 2, + ): + """ + Creates an infinite iterator that yields batches of transitions. + Will automatically restart when internal iterator is exhausted. + + Args: + batch_size (int): Size of batches to sample + async_prefetch (bool): Whether to use asynchronous prefetching with threads (default: True) + queue_size (int): Number of batches to prefetch (default: 2) + + Yields: + BatchTransition: Batched transitions + """ + while True: # Create an infinite loop + if async_prefetch: + # Get the standard iterator + iterator = self._get_async_iterator(queue_size=queue_size, batch_size=batch_size) + else: + iterator = self._get_naive_iterator(batch_size=batch_size, queue_size=queue_size) + + # Yield all items from the iterator + with suppress(StopIteration): + yield from iterator + + def _get_async_iterator(self, batch_size: int, queue_size: int = 2): + """ + Create an iterator that continuously yields prefetched batches in a + background thread. The design is intentionally simple and avoids busy + waiting / complex state management. + + Args: + batch_size (int): Size of batches to sample. + queue_size (int): Maximum number of prefetched batches to keep in + memory. + + Yields: + BatchTransition: A batch sampled from the replay buffer. + """ + import queue + import threading + + data_queue: queue.Queue = queue.Queue(maxsize=queue_size) + shutdown_event = threading.Event() + + def producer() -> None: + """Continuously put sampled batches into the queue until shutdown.""" + while not shutdown_event.is_set(): + try: + batch = self.sample(batch_size) + # The timeout ensures the thread unblocks if the queue is full + # and the shutdown event gets set meanwhile. + data_queue.put(batch, block=True, timeout=0.5) + except queue.Full: + # Queue is full – loop again (will re-check shutdown_event) + continue + except Exception: + # Surface any unexpected error and terminate the producer. + shutdown_event.set() + + producer_thread = threading.Thread(target=producer, daemon=True) + producer_thread.start() + + try: + while not shutdown_event.is_set(): + try: + yield data_queue.get(block=True) + except Exception: + # If the producer already set the shutdown flag we exit. + if shutdown_event.is_set(): + break + finally: + shutdown_event.set() + # Drain the queue quickly to help the thread exit if it's blocked on `put`. + while not data_queue.empty(): + _ = data_queue.get_nowait() + # Give the producer thread a bit of time to finish. + producer_thread.join(timeout=1.0) + + def _get_naive_iterator(self, batch_size: int, queue_size: int = 2): + """ + Creates a simple non-threaded iterator that yields batches. + + Args: + batch_size (int): Size of batches to sample + queue_size (int): Number of initial batches to prefetch + + Yields: + BatchTransition: Batch transitions + """ + import collections + + queue = collections.deque() + + def enqueue(n): + for _ in range(n): + data = self.sample(batch_size) + queue.append(data) + + enqueue(queue_size) + while queue: + yield queue.popleft() + enqueue(1) + + @classmethod + def from_lerobot_dataset( + cls, + lerobot_dataset: LeRobotDataset, + device: str = "cuda:0", + state_keys: Sequence[str] | None = None, + capacity: int | None = None, + image_augmentation_function: Callable | None = None, + use_drq: bool = True, + storage_device: str = "cpu", + optimize_memory: bool = False, + ) -> "ReplayBuffer": + """ + Convert a LeRobotDataset into a ReplayBuffer. + + Args: + lerobot_dataset (LeRobotDataset): The dataset to convert. + device (str): The device for sampling tensors. Defaults to "cuda:0". + state_keys (Sequence[str] | None): The list of keys that appear in `state` and `next_state`. + capacity (int | None): Buffer capacity. If None, uses dataset length. + action_mask (Sequence[int] | None): Indices of action dimensions to keep. + image_augmentation_function (Callable | None): Function for image augmentation. + If None, uses default random shift with pad=4. + use_drq (bool): Whether to use DrQ image augmentation when sampling. + storage_device (str): Device for storing tensor data. Using "cpu" saves GPU memory. + optimize_memory (bool): If True, reduces memory usage by not duplicating state data. + + Returns: + ReplayBuffer: The replay buffer with dataset transitions. + """ + if capacity is None: + capacity = len(lerobot_dataset) + + if capacity < len(lerobot_dataset): + raise ValueError( + "The capacity of the ReplayBuffer must be greater than or equal to the length of the LeRobotDataset." + ) + + # Create replay buffer with image augmentation and DrQ settings + replay_buffer = cls( + capacity=capacity, + device=device, + state_keys=state_keys, + image_augmentation_function=image_augmentation_function, + use_drq=use_drq, + storage_device=storage_device, + optimize_memory=optimize_memory, + ) + + # Convert dataset to transitions + list_transition = cls._lerobotdataset_to_transitions(dataset=lerobot_dataset, state_keys=state_keys) + + # Initialize the buffer with the first transition to set up storage tensors + if list_transition: + first_transition = list_transition[0] + first_state = {k: v.to(device) for k, v in first_transition["state"].items()} + first_action = first_transition["action"].to(device) + + # Get complementary info if available + first_complementary_info = None + if ( + "complementary_info" in first_transition + and first_transition["complementary_info"] is not None + ): + first_complementary_info = { + k: v.to(device) for k, v in first_transition["complementary_info"].items() + } + + replay_buffer._initialize_storage( + state=first_state, action=first_action, complementary_info=first_complementary_info + ) + + # Fill the buffer with all transitions + for data in list_transition: + for k, v in data.items(): + if isinstance(v, dict): + for key, tensor in v.items(): + v[key] = tensor.to(storage_device) + elif isinstance(v, torch.Tensor): + data[k] = v.to(storage_device) + + action = data["action"] + + replay_buffer.add( + state=data["state"], + action=action, + reward=data["reward"], + next_state=data["next_state"], + done=data["done"], + truncated=False, # NOTE: Truncation are not supported yet in lerobot dataset + complementary_info=data.get("complementary_info", None), + ) + + return replay_buffer + + def to_lerobot_dataset( + self, + repo_id: str, + fps=1, + root=None, + task_name="from_replay_buffer", + ) -> LeRobotDataset: + """ + Converts all transitions in this ReplayBuffer into a single LeRobotDataset object. + """ + if self.size == 0: + raise ValueError("The replay buffer is empty. Cannot convert to a dataset.") + + # Create features dictionary for the dataset + features = { + "index": {"dtype": "int64", "shape": [1]}, # global index across episodes + "episode_index": {"dtype": "int64", "shape": [1]}, # which episode + "frame_index": {"dtype": "int64", "shape": [1]}, # index inside an episode + "timestamp": {"dtype": "float32", "shape": [1]}, # for now we store dummy + "task_index": {"dtype": "int64", "shape": [1]}, + } + + # Add "action" + sample_action = self.actions[0] + act_info = guess_feature_info(t=sample_action, name="action") + features["action"] = act_info + + # Add "reward" and "done" + features["next.reward"] = {"dtype": "float32", "shape": (1,)} + features["next.done"] = {"dtype": "bool", "shape": (1,)} + + # Add state keys + for key in self.states: + sample_val = self.states[key][0] + f_info = guess_feature_info(t=sample_val, name=key) + features[key] = f_info + + # Add complementary_info keys if available + if self.has_complementary_info: + for key in self.complementary_info_keys: + sample_val = self.complementary_info[key][0] + if isinstance(sample_val, torch.Tensor) and sample_val.ndim == 0: + sample_val = sample_val.unsqueeze(0) + f_info = guess_feature_info(t=sample_val, name=f"complementary_info.{key}") + features[f"complementary_info.{key}"] = f_info + + # Create an empty LeRobotDataset + lerobot_dataset = LeRobotDataset.create( + repo_id=repo_id, + fps=fps, + root=root, + robot_type=None, + features=features, + use_videos=True, + ) + + # Start writing images if needed + lerobot_dataset.start_image_writer(num_processes=0, num_threads=3) + + # Convert transitions into episodes and frames + episode_index = 0 + lerobot_dataset.episode_buffer = lerobot_dataset.create_episode_buffer(episode_index=episode_index) + + frame_idx_in_episode = 0 + for idx in range(self.size): + actual_idx = (self.position - self.size + idx) % self.capacity + + frame_dict = {} + + # Fill the data for state keys + for key in self.states: + frame_dict[key] = self.states[key][actual_idx].cpu() + + # Fill action, reward, done + frame_dict["action"] = self.actions[actual_idx].cpu() + frame_dict["next.reward"] = torch.tensor([self.rewards[actual_idx]], dtype=torch.float32).cpu() + frame_dict["next.done"] = torch.tensor([self.dones[actual_idx]], dtype=torch.bool).cpu() + + # Add complementary_info if available + if self.has_complementary_info: + for key in self.complementary_info_keys: + val = self.complementary_info[key][actual_idx] + # Convert tensors to CPU + if isinstance(val, torch.Tensor): + if val.ndim == 0: + val = val.unsqueeze(0) + frame_dict[f"complementary_info.{key}"] = val.cpu() + # Non-tensor values can be used directly + else: + frame_dict[f"complementary_info.{key}"] = val + + # Add to the dataset's buffer + lerobot_dataset.add_frame(frame_dict, task=task_name) + + # Move to next frame + frame_idx_in_episode += 1 + + # If we reached an episode boundary, call save_episode, reset counters + if self.dones[actual_idx] or self.truncateds[actual_idx]: + lerobot_dataset.save_episode() + episode_index += 1 + frame_idx_in_episode = 0 + lerobot_dataset.episode_buffer = lerobot_dataset.create_episode_buffer( + episode_index=episode_index + ) + + # Save any remaining frames in the buffer + if lerobot_dataset.episode_buffer["size"] > 0: + lerobot_dataset.save_episode() + + lerobot_dataset.stop_image_writer() + + return lerobot_dataset + + @staticmethod + def _lerobotdataset_to_transitions( + dataset: LeRobotDataset, + state_keys: Sequence[str] | None = None, + ) -> list[Transition]: + """ + Convert a LeRobotDataset into a list of RL (s, a, r, s', done) transitions. + + Args: + dataset (LeRobotDataset): + The dataset to convert. Each item in the dataset is expected to have + at least the following keys: + { + "action": ... + "next.reward": ... + "next.done": ... + "episode_index": ... + } + plus whatever your 'state_keys' specify. + + state_keys (Sequence[str] | None): + The dataset keys to include in 'state' and 'next_state'. Their names + will be kept as-is in the output transitions. E.g. + ["observation.state", "observation.environment_state"]. + If None, you must handle or define default keys. + + Returns: + transitions (List[Transition]): + A list of Transition dictionaries with the same length as `dataset`. + """ + if state_keys is None: + raise ValueError("State keys must be provided when converting LeRobotDataset to Transitions.") + + transitions = [] + num_frames = len(dataset) + + # Check if the dataset has "next.done" key + sample = dataset[0] + has_done_key = "next.done" in sample + + # Check for complementary_info keys + complementary_info_keys = [key for key in sample if key.startswith("complementary_info.")] + has_complementary_info = len(complementary_info_keys) > 0 + + # If not, we need to infer it from episode boundaries + if not has_done_key: + print("'next.done' key not found in dataset. Inferring from episode boundaries...") + + for i in tqdm(range(num_frames)): + current_sample = dataset[i] + + # ----- 1) Current state ----- + current_state: dict[str, torch.Tensor] = {} + for key in state_keys: + val = current_sample[key] + current_state[key] = val.unsqueeze(0) # Add batch dimension + + # ----- 2) Action ----- + action = current_sample["action"].unsqueeze(0) # Add batch dimension + + # ----- 3) Reward and done ----- + reward = float(current_sample["next.reward"].item()) # ensure float + + # Determine done flag - use next.done if available, otherwise infer from episode boundaries + if has_done_key: + done = bool(current_sample["next.done"].item()) # ensure bool + else: + # If this is the last frame or if next frame is in a different episode, mark as done + done = False + if i == num_frames - 1: + done = True + elif i < num_frames - 1: + next_sample = dataset[i + 1] + if next_sample["episode_index"] != current_sample["episode_index"]: + done = True + + # TODO: (azouitine) Handle truncation (using the same value as done for now) + truncated = done + + # ----- 4) Next state ----- + # If not done and the next sample is in the same episode, we pull the next sample's state. + # Otherwise (done=True or next sample crosses to a new episode), next_state = current_state. + next_state = current_state # default + if not done and (i < num_frames - 1): + next_sample = dataset[i + 1] + if next_sample["episode_index"] == current_sample["episode_index"]: + # Build next_state from the same keys + next_state_data: dict[str, torch.Tensor] = {} + for key in state_keys: + val = next_sample[key] + next_state_data[key] = val.unsqueeze(0) # Add batch dimension + next_state = next_state_data + + # ----- 5) Complementary info (if available) ----- + complementary_info = None + if has_complementary_info: + complementary_info = {} + for key in complementary_info_keys: + # Strip the "complementary_info." prefix to get the actual key + clean_key = key[len("complementary_info.") :] + val = current_sample[key] + # Handle tensor and non-tensor values differently + if isinstance(val, torch.Tensor): + complementary_info[clean_key] = val.unsqueeze(0) # Add batch dimension + else: + # TODO: (azouitine) Check if it's necessary to convert to tensor + # For non-tensor values, use directly + complementary_info[clean_key] = val + + # ----- Construct the Transition ----- + transition = Transition( + state=current_state, + action=action, + reward=reward, + next_state=next_state, + done=done, + truncated=truncated, + complementary_info=complementary_info, + ) + transitions.append(transition) + + return transitions + + +# Utility function to guess shapes/dtypes from a tensor +def guess_feature_info(t, name: str): + """ + Return a dictionary with the 'dtype' and 'shape' for a given tensor or scalar value. + If it looks like a 3D (C,H,W) shape, we might consider it an 'image'. + Otherwise default to appropriate dtype for numeric. + """ + + shape = tuple(t.shape) + # Basic guess: if we have exactly 3 dims and shape[0] in {1, 3}, guess 'image' + if len(shape) == 3 and shape[0] in [1, 3]: + return { + "dtype": "image", + "shape": shape, + } + else: + # Otherwise treat as numeric + return { + "dtype": "float32", + "shape": shape, + } + + +def concatenate_batch_transitions( + left_batch_transitions: BatchTransition, right_batch_transition: BatchTransition +) -> BatchTransition: + """ + Concatenates two BatchTransition objects into one. + + This function merges the right BatchTransition into the left one by concatenating + all corresponding tensors along dimension 0. The operation modifies the left_batch_transitions + in place and also returns it. + + Args: + left_batch_transitions (BatchTransition): The first batch to concatenate and the one + that will be modified in place. + right_batch_transition (BatchTransition): The second batch to append to the first one. + + Returns: + BatchTransition: The concatenated batch (same object as left_batch_transitions). + + Warning: + This function modifies the left_batch_transitions object in place. + """ + # Concatenate state fields + left_batch_transitions["state"] = { + key: torch.cat( + [left_batch_transitions["state"][key], right_batch_transition["state"][key]], + dim=0, + ) + for key in left_batch_transitions["state"] + } + + # Concatenate basic fields + left_batch_transitions["action"] = torch.cat( + [left_batch_transitions["action"], right_batch_transition["action"]], dim=0 + ) + left_batch_transitions["reward"] = torch.cat( + [left_batch_transitions["reward"], right_batch_transition["reward"]], dim=0 + ) + + # Concatenate next_state fields + left_batch_transitions["next_state"] = { + key: torch.cat( + [left_batch_transitions["next_state"][key], right_batch_transition["next_state"][key]], + dim=0, + ) + for key in left_batch_transitions["next_state"] + } + + # Concatenate done and truncated fields + left_batch_transitions["done"] = torch.cat( + [left_batch_transitions["done"], right_batch_transition["done"]], dim=0 + ) + left_batch_transitions["truncated"] = torch.cat( + [left_batch_transitions["truncated"], right_batch_transition["truncated"]], + dim=0, + ) + + # Handle complementary_info + left_info = left_batch_transitions.get("complementary_info") + right_info = right_batch_transition.get("complementary_info") + + # Only process if right_info exists + if right_info is not None: + # Initialize left complementary_info if needed + if left_info is None: + left_batch_transitions["complementary_info"] = right_info + else: + # Concatenate each field + for key in right_info: + if key in left_info: + left_info[key] = torch.cat([left_info[key], right_info[key]], dim=0) + else: + left_info[key] = right_info[key] + + return left_batch_transitions diff --git a/lerobot/common/utils/import_utils.py b/lerobot/common/utils/import_utils.py index cd5f824502..5c29b5a847 100644 --- a/lerobot/common/utils/import_utils.py +++ b/lerobot/common/utils/import_utils.py @@ -28,6 +28,7 @@ def is_package_available(pkg_name: str, return_version: bool = False) -> tuple[b try: # Primary method to get the package version package_version = importlib.metadata.version(pkg_name) + except importlib.metadata.PackageNotFoundError: # Fallback method: Only for "torch" and versions containing "dev" if pkg_name == "torch": @@ -43,6 +44,9 @@ def is_package_available(pkg_name: str, return_version: bool = False) -> tuple[b except ImportError: # If the package can't be imported, it's not available package_exists = False + elif pkg_name == "grpc": + package = importlib.import_module(pkg_name) + package_version = getattr(package, "__version__", "N/A") else: # For packages other than "torch", don't attempt the fallback and set as not available package_exists = False diff --git a/lerobot/common/utils/process.py b/lerobot/common/utils/process.py new file mode 100644 index 0000000000..72438b6f98 --- /dev/null +++ b/lerobot/common/utils/process.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import signal +import sys + + +class ProcessSignalHandler: + """Utility class to attach graceful shutdown signal handlers. + + The class exposes a shutdown_event attribute that is set when a shutdown + signal is received. A counter tracks how many shutdown signals have been + caught. On the second signal the process exits with status 1. + """ + + _SUPPORTED_SIGNALS = ("SIGINT", "SIGTERM", "SIGHUP", "SIGQUIT") + + def __init__(self, use_threads: bool, display_pid: bool = False): + # TODO: Check if we can use Event from threading since Event from + # multiprocessing is the a clone of threading.Event. + # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Event + if use_threads: + from threading import Event + else: + from multiprocessing import Event + + self.shutdown_event = Event() + self._counter: int = 0 + self._display_pid = display_pid + + self._register_handlers() + + @property + def counter(self) -> int: # pragma: no cover – simple accessor + """Number of shutdown signals that have been intercepted.""" + return self._counter + + def _register_handlers(self): + """Attach the internal _signal_handler to a subset of POSIX signals.""" + + def _signal_handler(signum, frame): + pid_str = "" + if self._display_pid: + pid_str = f"[PID: {os.getpid()}]" + logging.info(f"{pid_str} Shutdown signal {signum} received. Cleaning up…") + self.shutdown_event.set() + self._counter += 1 + + # On a second Ctrl-C (or any supported signal) force the exit to + # mimic the previous behaviour while giving the caller one chance to + # shutdown gracefully. + # TODO: Investigate if we need it later + if self._counter > 1: + logging.info("Force shutdown") + sys.exit(1) + + for sig_name in self._SUPPORTED_SIGNALS: + sig = getattr(signal, sig_name, None) + if sig is None: + # The signal is not available on this platform (Windows for + # instance does not provide SIGHUP, SIGQUIT…). Skip it. + continue + try: + signal.signal(sig, _signal_handler) + except (ValueError, OSError): # pragma: no cover – unlikely but safe + # Signal not supported or we are in a non-main thread. + continue diff --git a/lerobot/common/utils/queue.py b/lerobot/common/utils/queue.py new file mode 100644 index 0000000000..ceb30e2bff --- /dev/null +++ b/lerobot/common/utils/queue.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from queue import Empty +from typing import Any + +from torch.multiprocessing import Queue + + +def get_last_item_from_queue(queue: Queue, block=True, timeout: float = 0.1) -> Any: + if block: + try: + item = queue.get(timeout=timeout) + except Empty: + return None + else: + item = None + + # Drain queue and keep only the most recent parameters + try: + while True: + item = queue.get_nowait() + except Empty: + pass + + return item diff --git a/lerobot/common/utils/transition.py b/lerobot/common/utils/transition.py new file mode 100644 index 0000000000..db413c388f --- /dev/null +++ b/lerobot/common/utils/transition.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TypedDict + +import torch + + +class Transition(TypedDict): + state: dict[str, torch.Tensor] + action: torch.Tensor + reward: float + next_state: dict[str, torch.Tensor] + done: bool + truncated: bool + complementary_info: dict[str, torch.Tensor | float | int] | None = None + + +def move_transition_to_device(transition: Transition, device: str = "cpu") -> Transition: + device = torch.device(device) + non_blocking = device.type == "cuda" + + # Move state tensors to device + transition["state"] = { + key: val.to(device, non_blocking=non_blocking) for key, val in transition["state"].items() + } + + # Move action to device + transition["action"] = transition["action"].to(device, non_blocking=non_blocking) + + # Move reward and done if they are tensors + if isinstance(transition["reward"], torch.Tensor): + transition["reward"] = transition["reward"].to(device, non_blocking=non_blocking) + + if isinstance(transition["done"], torch.Tensor): + transition["done"] = transition["done"].to(device, non_blocking=non_blocking) + + if isinstance(transition["truncated"], torch.Tensor): + transition["truncated"] = transition["truncated"].to(device, non_blocking=non_blocking) + + # Move next_state tensors to device + transition["next_state"] = { + key: val.to(device, non_blocking=non_blocking) for key, val in transition["next_state"].items() + } + + # Move complementary_info tensors if present + if transition.get("complementary_info") is not None: + for key, val in transition["complementary_info"].items(): + if isinstance(val, torch.Tensor): + transition["complementary_info"][key] = val.to(device, non_blocking=non_blocking) + elif isinstance(val, (int, float, bool)): + transition["complementary_info"][key] = torch.tensor(val, device=device) + else: + raise ValueError(f"Unsupported type {type(val)} for complementary_info[{key}]") + return transition + + +def move_state_dict_to_device(state_dict, device="cpu"): + """ + Recursively move all tensors in a (potentially) nested + dict/list/tuple structure to the CPU. + """ + if isinstance(state_dict, torch.Tensor): + return state_dict.to(device) + elif isinstance(state_dict, dict): + return {k: move_state_dict_to_device(v, device=device) for k, v in state_dict.items()} + elif isinstance(state_dict, list): + return [move_state_dict_to_device(v, device=device) for v in state_dict] + elif isinstance(state_dict, tuple): + return tuple(move_state_dict_to_device(v, device=device) for v in state_dict) + else: + return state_dict diff --git a/lerobot/common/utils/utils.py b/lerobot/common/utils/utils.py index 08e9a3c06b..cba65ba456 100644 --- a/lerobot/common/utils/utils.py +++ b/lerobot/common/utils/utils.py @@ -20,9 +20,11 @@ import select import subprocess import sys -from copy import copy +import time +from copy import copy, deepcopy from datetime import datetime, timezone from pathlib import Path +from statistics import mean import numpy as np import torch @@ -109,11 +111,17 @@ def is_amp_available(device: str): raise ValueError(f"Unknown device '{device}.") -def init_logging(): +def init_logging(log_file: Path | None = None, display_pid: bool = False): def custom_format(record): dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S") fnameline = f"{record.pathname}:{record.lineno}" - message = f"{record.levelname} {dt} {fnameline[-15:]:>15} {record.msg}" + + # NOTE: Display PID is useful for multi-process logging. + if display_pid: + pid_str = f"[PID: {os.getpid()}]" + message = f"{record.levelname} {pid_str} {dt} {fnameline[-15:]:>15} {record.msg}" + else: + message = f"{record.levelname} {dt} {fnameline[-15:]:>15} {record.msg}" return message logging.basicConfig(level=logging.INFO) @@ -127,6 +135,12 @@ def custom_format(record): console_handler.setFormatter(formatter) logging.getLogger().addHandler(console_handler) + if log_file is not None: + # Additionally write logs to file + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter(formatter) + logging.getLogger().addHandler(file_handler) + def format_big_number(num, precision=0): suffixes = ["", "K", "M", "B", "T", "Q"] @@ -247,3 +261,114 @@ def enter_pressed() -> bool: def move_cursor_up(lines): """Move the cursor up by a specified number of lines.""" print(f"\033[{lines}A", end="") + + +class TimerManager: + """ + Lightweight utility to measure elapsed time. + + Examples + -------- + ```python + # Example 1: Using context manager + timer = TimerManager("Policy", log=False) + for _ in range(3): + with timer: + time.sleep(0.01) + print(timer.last, timer.fps_avg, timer.percentile(90)) # Prints: 0.01 100.0 0.01 + ``` + + ```python + # Example 2: Using start/stop methods + timer = TimerManager("Policy", log=False) + timer.start() + time.sleep(0.01) + timer.stop() + print(timer.last, timer.fps_avg, timer.percentile(90)) # Prints: 0.01 100.0 0.01 + ``` + """ + + def __init__( + self, + label: str = "Elapsed-time", + log: bool = True, + logger: logging.Logger | None = None, + ): + self.label = label + self.log = log + self.logger = logger + self._start: float | None = None + self._history: list[float] = [] + + def __enter__(self): + return self.start() + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + + def start(self): + self._start = time.perf_counter() + return self + + def stop(self) -> float: + if self._start is None: + raise RuntimeError("Timer was never started.") + elapsed = time.perf_counter() - self._start + self._history.append(elapsed) + self._start = None + if self.log: + if self.logger is not None: + self.logger.info(f"{self.label}: {elapsed:.6f} s") + else: + logging.info(f"{self.label}: {elapsed:.6f} s") + return elapsed + + def reset(self): + self._history.clear() + + @property + def last(self) -> float: + return self._history[-1] if self._history else 0.0 + + @property + def avg(self) -> float: + return mean(self._history) if self._history else 0.0 + + @property + def total(self) -> float: + return sum(self._history) + + @property + def count(self) -> int: + return len(self._history) + + @property + def history(self) -> list[float]: + return deepcopy(self._history) + + @property + def fps_history(self) -> list[float]: + return [1.0 / t for t in self._history] + + @property + def fps_last(self) -> float: + return 0.0 if self.last == 0 else 1.0 / self.last + + @property + def fps_avg(self) -> float: + return 0.0 if self.avg == 0 else 1.0 / self.avg + + def percentile(self, p: float) -> float: + """ + Return the p-th percentile of recorded times. + """ + if not self._history: + return 0.0 + return float(np.percentile(self._history, p)) + + def fps_percentile(self, p: float) -> float: + """ + FPS corresponding to the p-th percentile time. + """ + val = self.percentile(p) + return 0.0 if val == 0 else 1.0 / val diff --git a/lerobot/common/utils/wandb_utils.py b/lerobot/common/utils/wandb_utils.py index 9e938e1917..ac4d223433 100644 --- a/lerobot/common/utils/wandb_utils.py +++ b/lerobot/common/utils/wandb_utils.py @@ -30,9 +30,10 @@ def cfg_to_group(cfg: TrainPipelineConfig, return_list: bool = False) -> list[st """Return a group name for logging. Optionally returns group name as list.""" lst = [ f"policy:{cfg.policy.type}", - f"dataset:{cfg.dataset.repo_id}", f"seed:{cfg.seed}", ] + if cfg.dataset is not None: + lst.append(f"dataset:{cfg.dataset.repo_id}") if cfg.env is not None: lst.append(f"env:{cfg.env.type}") return lst if return_list else "-".join(lst) @@ -92,6 +93,12 @@ def __init__(self, cfg: TrainPipelineConfig): resume="must" if cfg.resume else None, mode=self.cfg.mode if self.cfg.mode in ["online", "offline", "disabled"] else "online", ) + run_id = wandb.run.id + # NOTE: We will override the cfg.wandb.run_id with the wandb run id. + # This is because we want to be able to resume the run from the wandb run id. + cfg.wandb.run_id = run_id + # Handle custom step key for rl asynchronous training. + self._wandb_custom_step_key: set[str] | None = None print(colored("Logs will be synced with wandb.", "blue", attrs=["bold"])) logging.info(f"Track this run --> {colored(wandb.run.get_url(), 'yellow', attrs=['bold'])}") self._wandb = wandb @@ -108,9 +115,26 @@ def log_policy(self, checkpoint_dir: Path): artifact.add_file(checkpoint_dir / PRETRAINED_MODEL_DIR / SAFETENSORS_SINGLE_FILE) self._wandb.log_artifact(artifact) - def log_dict(self, d: dict, step: int, mode: str = "train"): + def log_dict( + self, d: dict, step: int | None = None, mode: str = "train", custom_step_key: str | None = None + ): if mode not in {"train", "eval"}: raise ValueError(mode) + if step is None and custom_step_key is None: + raise ValueError("Either step or custom_step_key must be provided.") + + # NOTE: This is not simple. Wandb step must always monotonically increase and it + # increases with each wandb.log call, but in the case of asynchronous RL for example, + # multiple time steps is possible. For example, the interaction step with the environment, + # the training step, the evaluation step, etc. So we need to define a custom step key + # to log the correct step for each metric. + if custom_step_key is not None: + if self._wandb_custom_step_key is None: + self._wandb_custom_step_key = set() + new_custom_key = f"{mode}/{custom_step_key}" + if new_custom_key not in self._wandb_custom_step_key: + self._wandb_custom_step_key.add(new_custom_key) + self._wandb.define_metric(new_custom_key, hidden=True) for k, v in d.items(): if not isinstance(v, (int, float, str)): @@ -118,7 +142,18 @@ def log_dict(self, d: dict, step: int, mode: str = "train"): f'WandB logging of key "{k}" was ignored as its type "{type(v)}" is not handled by this wrapper.' ) continue - self._wandb.log({f"{mode}/{k}": v}, step=step) + + # Do not log the custom step key itself. + if self._wandb_custom_step_key is not None and k in self._wandb_custom_step_key: + continue + + if custom_step_key is not None: + value_custom_step = d[custom_step_key] + data = {f"{mode}/{k}": v, f"{mode}/{custom_step_key}": value_custom_step} + self._wandb.log(data) + continue + + self._wandb.log(data={f"{mode}/{k}": v}, step=step) def log_video(self, video_path: str, step: int, mode: str = "train"): if mode not in {"train", "eval"}: diff --git a/lerobot/configs/control.py b/lerobot/configs/control.py deleted file mode 100644 index 07b8d13523..0000000000 --- a/lerobot/configs/control.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2024 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dataclasses import dataclass -from pathlib import Path - -import draccus - -from lerobot.common.robots import RobotConfig -from lerobot.configs import parser -from lerobot.configs.policies import PreTrainedConfig - - -@dataclass -class ControlConfig(draccus.ChoiceRegistry): - pass - - -@ControlConfig.register_subclass("calibrate") -@dataclass -class CalibrateControlConfig(ControlConfig): - # List of arms to calibrate (e.g. `--arms='["left_follower","right_follower"]' left_leader`) - arms: list[str] | None = None - - -@ControlConfig.register_subclass("teleoperate") -@dataclass -class TeleoperateControlConfig(ControlConfig): - # Limit the maximum frames per second. By default, no limit. - fps: int | None = None - teleop_time_s: float | None = None - # Display all cameras on screen - display_data: bool = False - - -@ControlConfig.register_subclass("record") -@dataclass -class RecordControlConfig(ControlConfig): - # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). - repo_id: str - # A short but accurate description of the task performed during the recording (e.g. "Pick the Lego block and drop it in the box on the right.") - single_task: str - # Root directory where the dataset will be stored (e.g. 'dataset/path'). - root: str | Path | None = None - policy: PreTrainedConfig | None = None - # Limit the frames per second. By default, uses the policy fps. - fps: int | None = None - # Number of seconds before starting data collection. It allows the robot devices to warmup and synchronize. - warmup_time_s: int | float = 10 - # Number of seconds for data recording for each episode. - episode_time_s: int | float = 60 - # Number of seconds for resetting the environment after each episode. - reset_time_s: int | float = 60 - # Number of episodes to record. - num_episodes: int = 50 - # Encode frames in the dataset into video - video: bool = True - # Upload dataset to Hugging Face hub. - push_to_hub: bool = True - # Upload on private repository on the Hugging Face hub. - private: bool = False - # Add tags to your dataset on the hub. - tags: list[str] | None = None - # Number of subprocesses handling the saving of frames as PNG. Set to 0 to use threads only; - # set to ≥1 to use subprocesses, each using threads to write images. The best number of processes - # and threads depends on your system. We recommend 4 threads per camera with 0 processes. - # If fps is unstable, adjust the thread count. If still unstable, try using 1 or more subprocesses. - num_image_writer_processes: int = 0 - # Number of threads writing the frames as png images on disk, per camera. - # Too many threads might cause unstable teleoperation fps due to main thread being blocked. - # Not enough threads might cause low camera fps. - num_image_writer_threads_per_camera: int = 4 - # Display all cameras on screen - display_data: bool = False - # Use vocal synthesis to read events. - play_sounds: bool = True - # Resume recording on an existing dataset. - resume: bool = False - - def __post_init__(self): - # HACK: We parse again the cli args here to get the pretrained path if there was one. - policy_path = parser.get_path_arg("control.policy") - if policy_path: - cli_overrides = parser.get_cli_overrides("control.policy") - self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) - self.policy.pretrained_path = policy_path - - -@ControlConfig.register_subclass("replay") -@dataclass -class ReplayControlConfig(ControlConfig): - # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`). - repo_id: str - # Index of the episode to replay. - episode: int - # Root directory where the dataset will be stored (e.g. 'dataset/path'). - root: str | Path | None = None - # Limit the frames per second. By default, uses the dataset fps. - fps: int | None = None - # Use vocal synthesis to read events. - play_sounds: bool = True - - -@ControlConfig.register_subclass("remote_robot") -@dataclass -class RemoteRobotConfig(ControlConfig): - log_interval: int = 100 - # Display all cameras on screen - display_data: bool = False - # Rerun configuration for remote robot (https://ref.rerun.io/docs/python/0.22.1/common/initialization_functions/#rerun.connect_tcp) - viewer_ip: str | None = None - viewer_port: str | None = None - - -@dataclass -class ControlPipelineConfig: - robot: RobotConfig - control: ControlConfig - - @classmethod - def __get_path_fields__(cls) -> list[str]: - """This enables the parser to load config from the policy using `--policy.path=local/dir`""" - return ["control.policy"] diff --git a/lerobot/configs/train.py b/lerobot/configs/train.py index 98826294ea..96a460bdf1 100644 --- a/lerobot/configs/train.py +++ b/lerobot/configs/train.py @@ -172,3 +172,8 @@ def from_pretrained( cli_args = kwargs.pop("cli_args", []) with draccus.config_type("json"): return draccus.parse(cls, config_file, args=cli_args) + + +@dataclass(kw_only=True) +class TrainRLServerPipelineConfig(TrainPipelineConfig): + dataset: DatasetConfig | None = None # NOTE: In RL, we don't need an offline dataset diff --git a/lerobot/configs/types.py b/lerobot/configs/types.py index 6b3d92e80d..6040ff70ba 100644 --- a/lerobot/configs/types.py +++ b/lerobot/configs/types.py @@ -23,6 +23,7 @@ class FeatureType(str, Enum): VISUAL = "VISUAL" ENV = "ENV" ACTION = "ACTION" + REWARD = "REWARD" class NormalizationMode(str, Enum): diff --git a/lerobot/scripts/find_joint_limits.py b/lerobot/scripts/find_joint_limits.py new file mode 100644 index 0000000000..95676dd359 --- /dev/null +++ b/lerobot/scripts/find_joint_limits.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Simple script to control a robot from teleoperation. + +Example: + +```shell +python -m lerobot.scripts.server.find_joint_limits \ + --robot.type=so100_follower \ + --robot.port=/dev/tty.usbmodem58760431541 \ + --robot.id=black \ + --teleop.type=so100_leader \ + --teleop.port=/dev/tty.usbmodem58760431551 \ + --teleop.id=blue +``` +""" + +import time +from dataclasses import dataclass + +import draccus +import numpy as np + +from lerobot.common.model.kinematics import RobotKinematics +from lerobot.common.robots import ( # noqa: F401 + RobotConfig, + koch_follower, + make_robot_from_config, + so100_follower, +) +from lerobot.common.teleoperators import ( # noqa: F401 + TeleoperatorConfig, + gamepad, + koch_leader, + make_teleoperator_from_config, + so100_leader, +) + + +@dataclass +class FindJointLimitsConfig: + teleop: TeleoperatorConfig + robot: RobotConfig + # Limit the maximum frames per second. By default, no limit. + teleop_time_s: float = 30 + # Display all cameras on screen + display_data: bool = False + + +@draccus.wrap() +def find_joint_and_ee_bounds(cfg: FindJointLimitsConfig): + teleop = make_teleoperator_from_config(cfg.teleop) + robot = make_robot_from_config(cfg.robot) + + teleop.connect() + robot.connect() + + start_episode_t = time.perf_counter() + robot_type = getattr(robot.config, "robot_type", "so101") + if "so100" in robot_type or "so101" in robot_type: + # Note to be compatible with the rest of the codebase, + # we are using the new calibration method for so101 and so100 + robot_type = "so_new_calibration" + kinematics = RobotKinematics(robot_type=robot_type) + + # Initialize min/max values + observation = robot.get_observation() + joint_positions = np.array([observation[f"{key}.pos"] for key in robot.bus.motors]) + ee_pos = kinematics.forward_kinematics(joint_positions, frame="gripper_tip")[:3, 3] + + max_pos = joint_positions.copy() + min_pos = joint_positions.copy() + max_ee = ee_pos.copy() + min_ee = ee_pos.copy() + + while True: + action = teleop.get_action() + robot.send_action(action) + + observation = robot.get_observation() + joint_positions = np.array([observation[f"{key}.pos"] for key in robot.bus.motors]) + ee_pos = kinematics.forward_kinematics(joint_positions, frame="gripper_tip")[:3, 3] + + # Skip initial warmup period + if (time.perf_counter() - start_episode_t) < 5: + continue + + # Update min/max values + max_ee = np.maximum(max_ee, ee_pos) + min_ee = np.minimum(min_ee, ee_pos) + max_pos = np.maximum(max_pos, joint_positions) + min_pos = np.minimum(min_pos, joint_positions) + + if time.perf_counter() - start_episode_t > cfg.teleop_time_s: + print(f"Max ee position {np.round(max_ee, 4).tolist()}") + print(f"Min ee position {np.round(min_ee, 4).tolist()}") + print(f"Max joint pos position {np.round(max_pos, 4).tolist()}") + print(f"Min joint pos position {np.round(min_pos, 4).tolist()}") + break + + +if __name__ == "__main__": + find_joint_and_ee_bounds() diff --git a/lerobot/scripts/rl/actor.py b/lerobot/scripts/rl/actor.py new file mode 100644 index 0000000000..da24d0dc58 --- /dev/null +++ b/lerobot/scripts/rl/actor.py @@ -0,0 +1,709 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Actor server runner for distributed HILSerl robot policy training. + +This script implements the actor component of the distributed HILSerl architecture. +It executes the policy in the robot environment, collects experience, +and sends transitions to the learner server for policy updates. + +Examples of usage: + +- Start an actor server for real robot training with human-in-the-loop intervention: +```bash +python lerobot/scripts/rl/actor.py --config_path lerobot/configs/train_config_hilserl_so100.json +``` + +**NOTE**: The actor server requires a running learner server to connect to. Ensure the learner +server is started before launching the actor. + +**NOTE**: Human intervention is key to HILSerl training. Press the upper right trigger button on the +gamepad to take control of the robot during training. Initially intervene frequently, then gradually +reduce interventions as the policy improves. + +**WORKFLOW**: +1. Determine robot workspace bounds using `find_joint_limits.py` +2. Record demonstrations with `gym_manipulator.py` in record mode +3. Process the dataset and determine camera crops with `crop_dataset_roi.py` +4. Start the learner server with the training configuration +5. Start this actor server with the same configuration +6. Use human interventions to guide policy learning + +For more details on the complete HILSerl training workflow, see: +https://github.com/michel-aractingi/lerobot-hilserl-guide +""" + +import logging +import os +import time +from functools import lru_cache +from queue import Empty + +import grpc +import torch +from torch import nn +from torch.multiprocessing import Event, Queue + +from lerobot.common.cameras import opencv # noqa: F401 +from lerobot.common.policies.factory import make_policy +from lerobot.common.policies.sac.modeling_sac import SACPolicy +from lerobot.common.robots import so100_follower # noqa: F401 +from lerobot.common.teleoperators import gamepad, so101_leader # noqa: F401 +from lerobot.common.transport import services_pb2, services_pb2_grpc +from lerobot.common.transport.utils import ( + bytes_to_state_dict, + python_object_to_bytes, + receive_bytes_in_chunks, + send_bytes_in_chunks, + transitions_to_bytes, +) +from lerobot.common.utils.process import ProcessSignalHandler +from lerobot.common.utils.queue import get_last_item_from_queue +from lerobot.common.utils.random_utils import set_seed +from lerobot.common.utils.robot_utils import busy_wait +from lerobot.common.utils.transition import ( + Transition, + move_state_dict_to_device, + move_transition_to_device, +) +from lerobot.common.utils.utils import ( + TimerManager, + get_safe_torch_device, + init_logging, +) +from lerobot.configs import parser +from lerobot.configs.train import TrainRLServerPipelineConfig +from lerobot.scripts.rl import learner_service +from lerobot.scripts.rl.gym_manipulator import make_robot_env + +ACTOR_SHUTDOWN_TIMEOUT = 30 + + +################################################# +# Main entry point # +################################################# + + +@parser.wrap() +def actor_cli(cfg: TrainRLServerPipelineConfig): + cfg.validate() + display_pid = False + if not use_threads(cfg): + import torch.multiprocessing as mp + + mp.set_start_method("spawn") + display_pid = True + + # Create logs directory to ensure it exists + log_dir = os.path.join(cfg.output_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"actor_{cfg.job_name}.log") + + # Initialize logging with explicit log file + init_logging(log_file=log_file, display_pid=display_pid) + logging.info(f"Actor logging initialized, writing to {log_file}") + + is_threaded = use_threads(cfg) + shutdown_event = ProcessSignalHandler(is_threaded, display_pid=display_pid).shutdown_event + + learner_client, grpc_channel = learner_service_client( + host=cfg.policy.actor_learner_config.learner_host, + port=cfg.policy.actor_learner_config.learner_port, + ) + + logging.info("[ACTOR] Establishing connection with Learner") + if not establish_learner_connection(learner_client, shutdown_event): + logging.error("[ACTOR] Failed to establish connection with Learner") + return + + if not use_threads(cfg): + # If we use multithreading, we can reuse the channel + grpc_channel.close() + grpc_channel = None + + logging.info("[ACTOR] Connection with Learner established") + + parameters_queue = Queue() + transitions_queue = Queue() + interactions_queue = Queue() + + concurrency_entity = None + if use_threads(cfg): + from threading import Thread + + concurrency_entity = Thread + else: + from multiprocessing import Process + + concurrency_entity = Process + + receive_policy_process = concurrency_entity( + target=receive_policy, + args=(cfg, parameters_queue, shutdown_event, grpc_channel), + daemon=True, + ) + + transitions_process = concurrency_entity( + target=send_transitions, + args=(cfg, transitions_queue, shutdown_event, grpc_channel), + daemon=True, + ) + + interactions_process = concurrency_entity( + target=send_interactions, + args=(cfg, interactions_queue, shutdown_event, grpc_channel), + daemon=True, + ) + + transitions_process.start() + interactions_process.start() + receive_policy_process.start() + + act_with_policy( + cfg=cfg, + shutdown_event=shutdown_event, + parameters_queue=parameters_queue, + transitions_queue=transitions_queue, + interactions_queue=interactions_queue, + ) + logging.info("[ACTOR] Policy process joined") + + logging.info("[ACTOR] Closing queues") + transitions_queue.close() + interactions_queue.close() + parameters_queue.close() + + transitions_process.join() + logging.info("[ACTOR] Transitions process joined") + interactions_process.join() + logging.info("[ACTOR] Interactions process joined") + receive_policy_process.join() + logging.info("[ACTOR] Receive policy process joined") + + logging.info("[ACTOR] join queues") + transitions_queue.cancel_join_thread() + interactions_queue.cancel_join_thread() + parameters_queue.cancel_join_thread() + + logging.info("[ACTOR] queues closed") + + +################################################# +# Core algorithm functions # +################################################# + + +def act_with_policy( + cfg: TrainRLServerPipelineConfig, + shutdown_event: any, # Event, + parameters_queue: Queue, + transitions_queue: Queue, + interactions_queue: Queue, +): + """ + Executes policy interaction within the environment. + + This function rolls out the policy in the environment, collecting interaction data and pushing it to a queue for streaming to the learner. + Once an episode is completed, updated network parameters received from the learner are retrieved from a queue and loaded into the network. + + Args: + cfg: Configuration settings for the interaction process. + shutdown_event: Event to check if the process should shutdown. + parameters_queue: Queue to receive updated network parameters from the learner. + transitions_queue: Queue to send transitions to the learner. + interactions_queue: Queue to send interactions to the learner. + """ + # Initialize logging for multiprocessing + if not use_threads(cfg): + log_dir = os.path.join(cfg.output_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"actor_policy_{os.getpid()}.log") + init_logging(log_file=log_file, display_pid=True) + logging.info("Actor policy process logging initialized") + + logging.info("make_env online") + + online_env = make_robot_env(cfg=cfg.env) + + set_seed(cfg.seed) + device = get_safe_torch_device(cfg.policy.device, log=True) + + torch.backends.cudnn.benchmark = True + torch.backends.cuda.matmul.allow_tf32 = True + + logging.info("make_policy") + + ### Instantiate the policy in both the actor and learner processes + ### To avoid sending a SACPolicy object through the port, we create a policy instance + ### on both sides, the learner sends the updated parameters every n steps to update the actor's parameters + policy: SACPolicy = make_policy( + cfg=cfg.policy, + env_cfg=cfg.env, + ) + policy = policy.eval() + assert isinstance(policy, nn.Module) + + obs, info = online_env.reset() + + # NOTE: For the moment we will solely handle the case of a single environment + sum_reward_episode = 0 + list_transition_to_send_to_learner = [] + episode_intervention = False + # Add counters for intervention rate calculation + episode_intervention_steps = 0 + episode_total_steps = 0 + + policy_timer = TimerManager("Policy inference", log=False) + + for interaction_step in range(cfg.policy.online_steps): + start_time = time.perf_counter() + if shutdown_event.is_set(): + logging.info("[ACTOR] Shutting down act_with_policy") + return + + if interaction_step >= cfg.policy.online_step_before_learning: + # Time policy inference and check if it meets FPS requirement + with policy_timer: + action = policy.select_action(batch=obs) + policy_fps = policy_timer.fps_last + + log_policy_frequency_issue(policy_fps=policy_fps, cfg=cfg, interaction_step=interaction_step) + + else: + action = online_env.action_space.sample() + + next_obs, reward, done, truncated, info = online_env.step(action) + + sum_reward_episode += float(reward) + # Increment total steps counter for intervention rate + episode_total_steps += 1 + + # NOTE: We override the action if the intervention is True, because the action applied is the intervention action + if "is_intervention" in info and info["is_intervention"]: + # NOTE: The action space for demonstration before hand is with the full action space + # but sometimes for example we want to deactivate the gripper + action = info["action_intervention"] + episode_intervention = True + # Increment intervention steps counter + episode_intervention_steps += 1 + + list_transition_to_send_to_learner.append( + Transition( + state=obs, + action=action, + reward=reward, + next_state=next_obs, + done=done, + truncated=truncated, # TODO: (azouitine) Handle truncation properly + complementary_info=info, + ) + ) + # assign obs to the next obs and continue the rollout + obs = next_obs + + if done or truncated: + logging.info(f"[ACTOR] Global step {interaction_step}: Episode reward: {sum_reward_episode}") + + update_policy_parameters(policy=policy.actor, parameters_queue=parameters_queue, device=device) + + if len(list_transition_to_send_to_learner) > 0: + push_transitions_to_transport_queue( + transitions=list_transition_to_send_to_learner, + transitions_queue=transitions_queue, + ) + list_transition_to_send_to_learner = [] + + stats = get_frequency_stats(policy_timer) + policy_timer.reset() + + # Calculate intervention rate + intervention_rate = 0.0 + if episode_total_steps > 0: + intervention_rate = episode_intervention_steps / episode_total_steps + + # Send episodic reward to the learner + interactions_queue.put( + python_object_to_bytes( + { + "Episodic reward": sum_reward_episode, + "Interaction step": interaction_step, + "Episode intervention": int(episode_intervention), + "Intervention rate": intervention_rate, + **stats, + } + ) + ) + + # Reset intervention counters + sum_reward_episode = 0.0 + episode_intervention = False + episode_intervention_steps = 0 + episode_total_steps = 0 + obs, info = online_env.reset() + + if cfg.env.fps is not None: + dt_time = time.perf_counter() - start_time + busy_wait(1 / cfg.env.fps - dt_time) + + +################################################# +# Communication Functions - Group all gRPC/messaging functions # +################################################# + + +def establish_learner_connection( + stub: services_pb2_grpc.LearnerServiceStub, + shutdown_event: Event, # type: ignore + attempts: int = 30, +): + """Establish a connection with the learner. + + Args: + stub (services_pb2_grpc.LearnerServiceStub): The stub to use for the connection. + shutdown_event (Event): The event to check if the connection should be established. + attempts (int): The number of attempts to establish the connection. + Returns: + bool: True if the connection is established, False otherwise. + """ + for _ in range(attempts): + if shutdown_event.is_set(): + logging.info("[ACTOR] Shutting down establish_learner_connection") + return False + + # Force a connection attempt and check state + try: + logging.info("[ACTOR] Send ready message to Learner") + if stub.Ready(services_pb2.Empty()) == services_pb2.Empty(): + return True + except grpc.RpcError as e: + logging.error(f"[ACTOR] Waiting for Learner to be ready... {e}") + time.sleep(2) + return False + + +@lru_cache(maxsize=1) +def learner_service_client( + host: str = "127.0.0.1", + port: int = 50051, +) -> tuple[services_pb2_grpc.LearnerServiceStub, grpc.Channel]: + import json + + """ + Returns a client for the learner service. + + GRPC uses HTTP/2, which is a binary protocol and multiplexes requests over a single connection. + So we need to create only one client and reuse it. + """ + + service_config = { + "methodConfig": [ + { + "name": [{}], # Applies to ALL methods in ALL services + "retryPolicy": { + "maxAttempts": 5, # Max retries (total attempts = 5) + "initialBackoff": "0.1s", # First retry after 0.1s + "maxBackoff": "2s", # Max wait time between retries + "backoffMultiplier": 2, # Exponential backoff factor + "retryableStatusCodes": [ + "UNAVAILABLE", + "DEADLINE_EXCEEDED", + ], # Retries on network failures + }, + } + ] + } + + service_config_json = json.dumps(service_config) + + channel = grpc.insecure_channel( + f"{host}:{port}", + options=[ + ("grpc.max_receive_message_length", learner_service.MAX_MESSAGE_SIZE), + ("grpc.max_send_message_length", learner_service.MAX_MESSAGE_SIZE), + ("grpc.enable_retries", 1), + ("grpc.service_config", service_config_json), + ], + ) + stub = services_pb2_grpc.LearnerServiceStub(channel) + logging.info("[ACTOR] Learner service client created") + return stub, channel + + +def receive_policy( + cfg: TrainRLServerPipelineConfig, + parameters_queue: Queue, + shutdown_event: Event, # type: ignore + learner_client: services_pb2_grpc.LearnerServiceStub | None = None, + grpc_channel: grpc.Channel | None = None, +): + """Receive parameters from the learner. + + Args: + cfg (TrainRLServerPipelineConfig): The configuration for the actor. + parameters_queue (Queue): The queue to receive the parameters. + shutdown_event (Event): The event to check if the process should shutdown. + """ + logging.info("[ACTOR] Start receiving parameters from the Learner") + if not use_threads(cfg): + # Create a process-specific log file + log_dir = os.path.join(cfg.output_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"actor_receive_policy_{os.getpid()}.log") + + # Initialize logging with explicit log file + init_logging(log_file=log_file, display_pid=True) + logging.info("Actor receive policy process logging initialized") + + # Setup process handlers to handle shutdown signal + # But use shutdown event from the main process + _ = ProcessSignalHandler(use_threads=False, display_pid=True) + + if grpc_channel is None or learner_client is None: + learner_client, grpc_channel = learner_service_client( + host=cfg.policy.actor_learner_config.learner_host, + port=cfg.policy.actor_learner_config.learner_port, + ) + + try: + iterator = learner_client.StreamParameters(services_pb2.Empty()) + receive_bytes_in_chunks( + iterator, + parameters_queue, + shutdown_event, + log_prefix="[ACTOR] parameters", + ) + + except grpc.RpcError as e: + logging.error(f"[ACTOR] gRPC error: {e}") + + if not use_threads(cfg): + grpc_channel.close() + logging.info("[ACTOR] Received policy loop stopped") + + +def send_transitions( + cfg: TrainRLServerPipelineConfig, + transitions_queue: Queue, + shutdown_event: any, # Event, + learner_client: services_pb2_grpc.LearnerServiceStub | None = None, + grpc_channel: grpc.Channel | None = None, +) -> services_pb2.Empty: + """ + Sends transitions to the learner. + + This function continuously retrieves messages from the queue and processes: + + - Transition Data: + - A batch of transitions (observation, action, reward, next observation) is collected. + - Transitions are moved to the CPU and serialized using PyTorch. + - The serialized data is wrapped in a `services_pb2.Transition` message and sent to the learner. + """ + + if not use_threads(cfg): + # Create a process-specific log file + log_dir = os.path.join(cfg.output_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"actor_transitions_{os.getpid()}.log") + + # Initialize logging with explicit log file + init_logging(log_file=log_file, display_pid=True) + logging.info("Actor transitions process logging initialized") + + if grpc_channel is None or learner_client is None: + learner_client, grpc_channel = learner_service_client( + host=cfg.policy.actor_learner_config.learner_host, + port=cfg.policy.actor_learner_config.learner_port, + ) + + try: + learner_client.SendTransitions( + transitions_stream( + shutdown_event, transitions_queue, cfg.policy.actor_learner_config.queue_get_timeout + ) + ) + except grpc.RpcError as e: + logging.error(f"[ACTOR] gRPC error: {e}") + + logging.info("[ACTOR] Finished streaming transitions") + + if not use_threads(cfg): + grpc_channel.close() + logging.info("[ACTOR] Transitions process stopped") + + +def send_interactions( + cfg: TrainRLServerPipelineConfig, + interactions_queue: Queue, + shutdown_event: Event, # type: ignore + learner_client: services_pb2_grpc.LearnerServiceStub | None = None, + grpc_channel: grpc.Channel | None = None, +) -> services_pb2.Empty: + """ + Sends interactions to the learner. + + This function continuously retrieves messages from the queue and processes: + + - Interaction Messages: + - Contains useful statistics about episodic rewards and policy timings. + - The message is serialized using `pickle` and sent to the learner. + """ + + if not use_threads(cfg): + # Create a process-specific log file + log_dir = os.path.join(cfg.output_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"actor_interactions_{os.getpid()}.log") + + # Initialize logging with explicit log file + init_logging(log_file=log_file, display_pid=True) + logging.info("Actor interactions process logging initialized") + + # Setup process handlers to handle shutdown signal + # But use shutdown event from the main process + _ = ProcessSignalHandler(use_threads=False, display_pid=True) + + if grpc_channel is None or learner_client is None: + learner_client, grpc_channel = learner_service_client( + host=cfg.policy.actor_learner_config.learner_host, + port=cfg.policy.actor_learner_config.learner_port, + ) + + try: + learner_client.SendInteractions( + interactions_stream( + shutdown_event, interactions_queue, cfg.policy.actor_learner_config.queue_get_timeout + ) + ) + except grpc.RpcError as e: + logging.error(f"[ACTOR] gRPC error: {e}") + + logging.info("[ACTOR] Finished streaming interactions") + + if not use_threads(cfg): + grpc_channel.close() + logging.info("[ACTOR] Interactions process stopped") + + +def transitions_stream(shutdown_event: Event, transitions_queue: Queue, timeout: float) -> services_pb2.Empty: # type: ignore + while not shutdown_event.is_set(): + try: + message = transitions_queue.get(block=True, timeout=timeout) + except Empty: + logging.debug("[ACTOR] Transition queue is empty") + continue + + yield from send_bytes_in_chunks( + message, services_pb2.Transition, log_prefix="[ACTOR] Send transitions" + ) + + return services_pb2.Empty() + + +def interactions_stream( + shutdown_event: Event, + interactions_queue: Queue, + timeout: float, # type: ignore +) -> services_pb2.Empty: + while not shutdown_event.is_set(): + try: + message = interactions_queue.get(block=True, timeout=timeout) + except Empty: + logging.debug("[ACTOR] Interaction queue is empty") + continue + + yield from send_bytes_in_chunks( + message, + services_pb2.InteractionMessage, + log_prefix="[ACTOR] Send interactions", + ) + + return services_pb2.Empty() + + +################################################# +# Policy functions # +################################################# + + +def update_policy_parameters(policy: SACPolicy, parameters_queue: Queue, device): + bytes_state_dict = get_last_item_from_queue(parameters_queue, block=False) + if bytes_state_dict is not None: + logging.info("[ACTOR] Load new parameters from Learner.") + state_dict = bytes_to_state_dict(bytes_state_dict) + state_dict = move_state_dict_to_device(state_dict, device=device) + policy.load_state_dict(state_dict) + + +################################################# +# Utilities functions # +################################################# + + +def push_transitions_to_transport_queue(transitions: list, transitions_queue): + """Send transitions to learner in smaller chunks to avoid network issues. + + Args: + transitions: List of transitions to send + message_queue: Queue to send messages to learner + chunk_size: Size of each chunk to send + """ + transition_to_send_to_learner = [] + for transition in transitions: + tr = move_transition_to_device(transition=transition, device="cpu") + for key, value in tr["state"].items(): + if torch.isnan(value).any(): + logging.warning(f"Found NaN values in transition {key}") + + transition_to_send_to_learner.append(tr) + + transitions_queue.put(transitions_to_bytes(transition_to_send_to_learner)) + + +def get_frequency_stats(timer: TimerManager) -> dict[str, float]: + """Get the frequency statistics of the policy. + + Args: + timer (TimerManager): The timer with collected metrics. + + Returns: + dict[str, float]: The frequency statistics of the policy. + """ + stats = {} + if timer.count > 1: + avg_fps = timer.fps_avg + p90_fps = timer.fps_percentile(90) + logging.debug(f"[ACTOR] Average policy frame rate: {avg_fps}") + logging.debug(f"[ACTOR] Policy frame rate 90th percentile: {p90_fps}") + stats = { + "Policy frequency [Hz]": avg_fps, + "Policy frequency 90th-p [Hz]": p90_fps, + } + return stats + + +def log_policy_frequency_issue(policy_fps: float, cfg: TrainRLServerPipelineConfig, interaction_step: int): + if policy_fps < cfg.env.fps: + logging.warning( + f"[ACTOR] Policy FPS {policy_fps:.1f} below required {cfg.env.fps} at step {interaction_step}" + ) + + +def use_threads(cfg: TrainRLServerPipelineConfig) -> bool: + return cfg.policy.concurrency.actor == "threads" + + +if __name__ == "__main__": + actor_cli() diff --git a/lerobot/scripts/rl/crop_dataset_roi.py b/lerobot/scripts/rl/crop_dataset_roi.py new file mode 100644 index 0000000000..5b7038de30 --- /dev/null +++ b/lerobot/scripts/rl/crop_dataset_roi.py @@ -0,0 +1,314 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import json +from copy import deepcopy +from pathlib import Path +from typing import Dict, Tuple + +import cv2 + +# import torch.nn.functional as F # noqa: N812 +import torchvision.transforms.functional as F # type: ignore # noqa: N812 +from tqdm import tqdm # type: ignore + +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset + + +def select_rect_roi(img): + """ + Allows the user to draw a rectangular ROI on the image. + + The user must click and drag to draw the rectangle. + - While dragging, the rectangle is dynamically drawn. + - On mouse button release, the rectangle is fixed. + - Press 'c' to confirm the selection. + - Press 'r' to reset the selection. + - Press ESC to cancel. + + Returns: + A tuple (top, left, height, width) representing the rectangular ROI, + or None if no valid ROI is selected. + """ + # Create a working copy of the image + clone = img.copy() + working_img = clone.copy() + + roi = None # Will store the final ROI as (top, left, height, width) + drawing = False + index_x, index_y = -1, -1 # Initial click coordinates + + def mouse_callback(event, x, y, flags, param): + nonlocal index_x, index_y, drawing, roi, working_img + + if event == cv2.EVENT_LBUTTONDOWN: + # Start drawing: record starting coordinates + drawing = True + index_x, index_y = x, y + + elif event == cv2.EVENT_MOUSEMOVE: + if drawing: + # Compute the top-left and bottom-right corners regardless of drag direction + top = min(index_y, y) + left = min(index_x, x) + bottom = max(index_y, y) + right = max(index_x, x) + # Show a temporary image with the current rectangle drawn + temp = working_img.copy() + cv2.rectangle(temp, (left, top), (right, bottom), (0, 255, 0), 2) + cv2.imshow("Select ROI", temp) + + elif event == cv2.EVENT_LBUTTONUP: + # Finish drawing + drawing = False + top = min(index_y, y) + left = min(index_x, x) + bottom = max(index_y, y) + right = max(index_x, x) + height = bottom - top + width = right - left + roi = (top, left, height, width) # (top, left, height, width) + # Draw the final rectangle on the working image and display it + working_img = clone.copy() + cv2.rectangle(working_img, (left, top), (right, bottom), (0, 255, 0), 2) + cv2.imshow("Select ROI", working_img) + + # Create the window and set the callback + cv2.namedWindow("Select ROI") + cv2.setMouseCallback("Select ROI", mouse_callback) + cv2.imshow("Select ROI", working_img) + + print("Instructions for ROI selection:") + print(" - Click and drag to draw a rectangular ROI.") + print(" - Press 'c' to confirm the selection.") + print(" - Press 'r' to reset and draw again.") + print(" - Press ESC to cancel the selection.") + + # Wait until the user confirms with 'c', resets with 'r', or cancels with ESC + while True: + key = cv2.waitKey(1) & 0xFF + # Confirm ROI if one has been drawn + if key == ord("c") and roi is not None: + break + # Reset: clear the ROI and restore the original image + elif key == ord("r"): + working_img = clone.copy() + roi = None + cv2.imshow("Select ROI", working_img) + # Cancel selection for this image + elif key == 27: # ESC key + roi = None + break + + cv2.destroyWindow("Select ROI") + return roi + + +def select_square_roi_for_images(images: dict) -> dict: + """ + For each image in the provided dictionary, open a window to allow the user + to select a rectangular ROI. Returns a dictionary mapping each key to a tuple + (top, left, height, width) representing the ROI. + + Parameters: + images (dict): Dictionary where keys are identifiers and values are OpenCV images. + + Returns: + dict: Mapping of image keys to the selected rectangular ROI. + """ + selected_rois = {} + + for key, img in images.items(): + if img is None: + print(f"Image for key '{key}' is None, skipping.") + continue + + print(f"\nSelect rectangular ROI for image with key: '{key}'") + roi = select_rect_roi(img) + + if roi is None: + print(f"No valid ROI selected for '{key}'.") + else: + selected_rois[key] = roi + print(f"ROI for '{key}': {roi}") + + return selected_rois + + +def get_image_from_lerobot_dataset(dataset: LeRobotDataset): + """ + Find the first row in the dataset and extract the image in order to be used for the crop. + """ + row = dataset[0] + image_dict = {} + for k in row: + if "image" in k: + image_dict[k] = deepcopy(row[k]) + return image_dict + + +def convert_lerobot_dataset_to_cropper_lerobot_dataset( + original_dataset: LeRobotDataset, + crop_params_dict: Dict[str, Tuple[int, int, int, int]], + new_repo_id: str, + new_dataset_root: str, + resize_size: Tuple[int, int] = (128, 128), + push_to_hub: bool = False, + task: str = "", +) -> LeRobotDataset: + """ + Converts an existing LeRobotDataset by iterating over its episodes and frames, + applying cropping and resizing to image observations, and saving a new dataset + with the transformed data. + + Args: + original_dataset (LeRobotDataset): The source dataset. + crop_params_dict (Dict[str, Tuple[int, int, int, int]]): + A dictionary mapping observation keys to crop parameters (top, left, height, width). + new_repo_id (str): Repository id for the new dataset. + new_dataset_root (str): The root directory where the new dataset will be written. + resize_size (Tuple[int, int], optional): The target size (height, width) after cropping. + Defaults to (128, 128). + + Returns: + LeRobotDataset: A new LeRobotDataset where the specified image observations have been cropped + and resized. + """ + # 1. Create a new (empty) LeRobotDataset for writing. + new_dataset = LeRobotDataset.create( + repo_id=new_repo_id, + fps=original_dataset.fps, + root=new_dataset_root, + robot_type=original_dataset.meta.robot_type, + features=original_dataset.meta.info["features"], + use_videos=len(original_dataset.meta.video_keys) > 0, + ) + + # Update the metadata for every image key that will be cropped: + # (Here we simply set the shape to be the final resize_size.) + for key in crop_params_dict: + if key in new_dataset.meta.info["features"]: + new_dataset.meta.info["features"][key]["shape"] = [3] + list(resize_size) + + # TODO: Directly modify the mp4 video + meta info features, instead of recreating a dataset + prev_episode_index = 0 + for frame_idx in tqdm(range(len(original_dataset))): + frame = original_dataset[frame_idx] + + # Create a copy of the frame to add to the new dataset + new_frame = {} + for key, value in frame.items(): + if key in ("task_index", "timestamp", "episode_index", "frame_index", "index", "task"): + continue + if key in ("next.done", "next.reward"): + # if not isinstance(value, str) and len(value.shape) == 0: + value = value.unsqueeze(0) + + if key in crop_params_dict: + top, left, height, width = crop_params_dict[key] + # Apply crop then resize. + cropped = F.crop(value, top, left, height, width) + value = F.resize(cropped, resize_size) + value = value.clamp(0, 1) + + new_frame[key] = value + + new_dataset.add_frame(new_frame, task=task) + + if frame["episode_index"].item() != prev_episode_index: + # Save the episode + new_dataset.save_episode() + prev_episode_index = frame["episode_index"].item() + + # Save the last episode + new_dataset.save_episode() + + if push_to_hub: + new_dataset.push_to_hub() + + return new_dataset + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Crop rectangular ROIs from a LeRobot dataset.") + parser.add_argument( + "--repo-id", + type=str, + default="lerobot", + help="The repository id of the LeRobot dataset to process.", + ) + parser.add_argument( + "--root", + type=str, + default=None, + help="The root directory of the LeRobot dataset.", + ) + parser.add_argument( + "--crop-params-path", + type=str, + default=None, + help="The path to the JSON file containing the ROIs.", + ) + parser.add_argument( + "--push-to-hub", + type=bool, + default=False, + help="Whether to push the new dataset to the hub.", + ) + parser.add_argument( + "--task", + type=str, + default="", + help="The natural language task to describe the dataset.", + ) + args = parser.parse_args() + + dataset = LeRobotDataset(repo_id=args.repo_id, root=args.root) + + images = get_image_from_lerobot_dataset(dataset) + images = {k: v.cpu().permute(1, 2, 0).numpy() for k, v in images.items()} + images = {k: (v * 255).astype("uint8") for k, v in images.items()} + + if args.crop_params_path is None: + rois = select_square_roi_for_images(images) + else: + with open(args.crop_params_path) as f: + rois = json.load(f) + + # Print the selected rectangular ROIs + print("\nSelected Rectangular Regions of Interest (top, left, height, width):") + for key, roi in rois.items(): + print(f"{key}: {roi}") + + new_repo_id = args.repo_id + "_cropped_resized" + new_dataset_root = Path(str(dataset.root) + "_cropped_resized") + + cropped_resized_dataset = convert_lerobot_dataset_to_cropper_lerobot_dataset( + original_dataset=dataset, + crop_params_dict=rois, + new_repo_id=new_repo_id, + new_dataset_root=new_dataset_root, + resize_size=(128, 128), + push_to_hub=args.push_to_hub, + task=args.task, + ) + + meta_dir = new_dataset_root / "meta" + meta_dir.mkdir(exist_ok=True) + + with open(meta_dir / "crop_params.json", "w") as f: + json.dump(rois, f, indent=4) diff --git a/lerobot/scripts/rl/gym_manipulator.py b/lerobot/scripts/rl/gym_manipulator.py new file mode 100644 index 0000000000..98445e6668 --- /dev/null +++ b/lerobot/scripts/rl/gym_manipulator.py @@ -0,0 +1,2171 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Robot Environment for LeRobot Manipulation Tasks + +This module provides a comprehensive gym-compatible environment for robot manipulation +with support for: +- Multiple robot types (SO100, SO101, Koch and Moss) +- Human intervention via leader-follower control or gamepad + +- End-effector and joint space control +- Image processing (cropping and resizing) + +The environment is built using a composable wrapper pattern where each wrapper +adds specific functionality to the base RobotEnv. + +Example: + env = make_robot_env(cfg) + obs, info = env.reset() + action = policy.select_action(obs) + obs, reward, terminated, truncated, info = env.step(action) +""" + +import logging +import time +from collections import deque +from threading import Lock +from typing import Annotated, Any, Sequence + +import gymnasium as gym +import numpy as np +import torch +import torchvision.transforms.functional as F # noqa: N812 + +from lerobot.common.cameras import opencv # noqa: F401 +from lerobot.common.envs.configs import EnvConfig +from lerobot.common.envs.utils import preprocess_observation +from lerobot.common.model.kinematics import RobotKinematics +from lerobot.common.robots import ( # noqa: F401 + RobotConfig, + make_robot_from_config, + so100_follower, +) +from lerobot.common.teleoperators import ( + gamepad, # noqa: F401 + make_teleoperator_from_config, + so101_leader, # noqa: F401 +) +from lerobot.common.teleoperators.gamepad.teleop_gamepad import GamepadTeleop +from lerobot.common.utils.robot_utils import busy_wait +from lerobot.common.utils.utils import log_say +from lerobot.configs import parser + +logging.basicConfig(level=logging.INFO) + + +def reset_follower_position(robot_arm, target_position): + current_position_dict = robot_arm.bus.sync_read("Present_Position") + current_position = np.array( + [current_position_dict[name] for name in current_position_dict], dtype=np.float32 + ) + trajectory = torch.from_numpy( + np.linspace(current_position, target_position, 50) + ) # NOTE: 30 is just an arbitrary number + for pose in trajectory: + action_dict = dict(zip(current_position_dict, pose, strict=False)) + robot_arm.bus.sync_write("Goal_Position", action_dict) + busy_wait(0.015) + + +class TorchBox(gym.spaces.Box): + """ + A version of gym.spaces.Box that handles PyTorch tensors. + + This class extends gym.spaces.Box to work with PyTorch tensors, + providing compatibility between NumPy arrays and PyTorch tensors. + """ + + def __init__( + self, + low: float | Sequence[float] | np.ndarray, + high: float | Sequence[float] | np.ndarray, + shape: Sequence[int] | None = None, + np_dtype: np.dtype | type = np.float32, + torch_dtype: torch.dtype = torch.float32, + device: str = "cpu", + seed: int | np.random.Generator | None = None, + ) -> None: + """ + Initialize the PyTorch-compatible Box space. + + Args: + low: Lower bounds of the space. + high: Upper bounds of the space. + shape: Shape of the space. If None, inferred from low and high. + np_dtype: NumPy data type for internal storage. + torch_dtype: PyTorch data type for tensor conversion. + device: PyTorch device for returned tensors. + seed: Random seed for sampling. + """ + super().__init__(low, high, shape=shape, dtype=np_dtype, seed=seed) + self.torch_dtype = torch_dtype + self.device = device + + def sample(self) -> torch.Tensor: + """ + Sample a random point from the space. + + Returns: + A PyTorch tensor within the space bounds. + """ + arr = super().sample() + return torch.as_tensor(arr, dtype=self.torch_dtype, device=self.device) + + def contains(self, x: torch.Tensor) -> bool: + """ + Check if a tensor is within the space bounds. + + Args: + x: The PyTorch tensor to check. + + Returns: + Boolean indicating whether the tensor is within bounds. + """ + # Move to CPU/numpy and cast to the internal dtype + arr = x.detach().cpu().numpy().astype(self.dtype, copy=False) + return super().contains(arr) + + def seed(self, seed: int | np.random.Generator | None = None): + """ + Set the random seed for sampling. + + Args: + seed: The random seed to use. + + Returns: + List containing the seed. + """ + super().seed(seed) + return [seed] + + def __repr__(self) -> str: + """ + Return a string representation of the space. + + Returns: + Formatted string with space details. + """ + return ( + f"TorchBox({self.low_repr}, {self.high_repr}, {self.shape}, " + f"np={self.dtype.name}, torch={self.torch_dtype}, device={self.device})" + ) + + +class TorchActionWrapper(gym.Wrapper): + """ + Wrapper that changes the action space to use PyTorch tensors. + + This wrapper modifies the action space to return PyTorch tensors when sampled + and handles converting PyTorch actions to NumPy when stepping the environment. + """ + + def __init__(self, env: gym.Env, device: str): + """ + Initialize the PyTorch action space wrapper. + + Args: + env: The environment to wrap. + device: The PyTorch device to use for tensor operations. + """ + super().__init__(env) + self.action_space = TorchBox( + low=env.action_space.low, + high=env.action_space.high, + shape=env.action_space.shape, + torch_dtype=torch.float32, + device=torch.device("cpu"), + ) + + def step(self, action: torch.Tensor): + """ + Step the environment with a PyTorch tensor action. + + This method handles conversion from PyTorch tensors to NumPy arrays + for compatibility with the underlying environment. + + Args: + action: PyTorch tensor action to take. + + Returns: + Tuple of (observation, reward, terminated, truncated, info). + """ + if action.dim() == 2: + action = action.squeeze(0) + action = action.detach().cpu().numpy() + return self.env.step(action) + + +class RobotEnv(gym.Env): + """ + Gym-compatible environment for evaluating robotic control policies with integrated human intervention. + + This environment wraps a robot interface to provide a consistent API for policy evaluation. It supports both relative (delta) + and absolute joint position commands and automatically configures its observation and action spaces based on the robot's + sensors and configuration. + """ + + def __init__( + self, + robot, + use_gripper: bool = False, + display_cameras: bool = False, + ): + """ + Initialize the RobotEnv environment. + + The environment is set up with a robot interface, which is used to capture observations and send joint commands. The setup + supports both relative (delta) adjustments and absolute joint positions for controlling the robot. + + Args: + robot: The robot interface object used to connect and interact with the physical robot. + display_cameras: If True, the robot's camera feeds will be displayed during execution. + """ + super().__init__() + + self.robot = robot + self.display_cameras = display_cameras + + # Connect to the robot if not already connected. + if not self.robot.is_connected: + self.robot.connect() + + # Episode tracking. + self.current_step = 0 + self.episode_data = None + + self._joint_names = [f"{key}.pos" for key in self.robot.bus.motors] + self._image_keys = self.robot.cameras.keys() + + # Read initial joint positions using the bus + self.current_joint_positions = self._get_observation()["agent_pos"] + + self.use_gripper = use_gripper + + self._setup_spaces() + + def _get_observation(self) -> np.ndarray: + """Helper to convert a dictionary from bus.sync_read to an ordered numpy array.""" + obs_dict = self.robot.get_observation() + joint_positions = np.array([obs_dict[name] for name in self._joint_names], dtype=np.float32) + + images = {key: obs_dict[key] for key in self._image_keys} + return {"agent_pos": joint_positions, "pixels": images} + + def _setup_spaces(self): + """ + Dynamically configure the observation and action spaces based on the robot's capabilities. + + Observation Space: + - For keys with "image": A Box space with pixel values ranging from 0 to 255. + - For non-image keys: A nested Dict space is created under 'observation.state' with a suitable range. + + Action Space: + - The action space is defined as a Box space representing joint position commands. It is defined as relative (delta) + or absolute, based on the configuration. + """ + example_obs = self._get_observation() + + observation_spaces = {} + + # Define observation spaces for images and other states. + if "pixels" in example_obs: + prefix = "observation.images" if len(example_obs["pixels"]) > 1 else "observation.image" + observation_spaces = { + f"{prefix}.{key}": gym.spaces.Box( + low=0, high=255, shape=example_obs["pixels"][key].shape, dtype=np.uint8 + ) + for key in example_obs["pixels"] + } + + observation_spaces["observation.state"] = gym.spaces.Box( + low=0, + high=10, + shape=example_obs["agent_pos"].shape, + dtype=np.float32, + ) + + self.observation_space = gym.spaces.Dict(observation_spaces) + + # Define the action space for joint positions along with setting an intervention flag. + action_dim = 3 + bounds = {} + bounds["min"] = -np.ones(action_dim) + bounds["max"] = np.ones(action_dim) + + if self.use_gripper: + action_dim += 1 + bounds["min"] = np.concatenate([bounds["min"], [0]]) + bounds["max"] = np.concatenate([bounds["max"], [2]]) + + self.action_space = gym.spaces.Box( + low=bounds["min"], + high=bounds["max"], + shape=(action_dim,), + dtype=np.float32, + ) + + def reset(self, seed=None, options=None) -> tuple[dict[str, np.ndarray], dict[str, Any]]: + """ + Reset the environment to its initial state. + This method resets the step counter and clears any episodic data. + + Args: + seed: A seed for random number generation to ensure reproducibility. + options: Additional options to influence the reset behavior. + + Returns: + A tuple containing: + - observation (dict): The initial sensor observation. + - info (dict): A dictionary with supplementary information, including the key "is_intervention". + """ + super().reset(seed=seed, options=options) + + self.robot.reset() + + # Capture the initial observation. + observation = self._get_observation() + + # Reset episode tracking variables. + self.current_step = 0 + self.episode_data = None + + return observation, {"is_intervention": False} + + def step(self, action) -> tuple[dict[str, np.ndarray], float, bool, bool, dict[str, Any]]: + """ + Execute a single step within the environment using the specified action. + + The provided action is processed and sent to the robot as joint position commands + that may be either absolute values or deltas based on the environment configuration. + + Args: + action: The commanded joint positions as a numpy array or torch tensor. + + Returns: + A tuple containing: + - observation (dict): The new sensor observation after taking the step. + - reward (float): The step reward (default is 0.0 within this wrapper). + - terminated (bool): True if the episode has reached a terminal state. + - truncated (bool): True if the episode was truncated (e.g., time constraints). + - info (dict): Additional debugging information including intervention status. + """ + self.current_joint_positions = self._get_observation()["agent_pos"] + + action_dict = {"delta_x": action[0], "delta_y": action[1], "delta_z": action[2]} + + # 1.0 action corresponds to no-op action + action_dict["gripper"] = action[3] if self.use_gripper else 1.0 + + self.robot.send_action(action_dict) + + if self.display_cameras: + self.render() + + self.current_step += 1 + + reward = 0.0 + terminated = False + truncated = False + + return ( + self._get_observation(), + reward, + terminated, + truncated, + {"is_intervention": False}, + ) + + def render(self): + """ + Render the current state of the environment by displaying the robot's camera feeds. + """ + import cv2 + + observation = self._get_observation() + image_keys = [key for key in observation if "image" in key] + + for key in image_keys: + cv2.imshow(key, cv2.cvtColor(observation[key].numpy(), cv2.COLOR_RGB2BGR)) + cv2.waitKey(1) + + def close(self): + """ + Close the environment and clean up resources by disconnecting the robot. + + If the robot is currently connected, this method properly terminates the connection to ensure that all + associated resources are released. + """ + if self.robot.is_connected: + self.robot.disconnect() + + +class AddJointVelocityToObservation(gym.ObservationWrapper): + """ + Wrapper that adds joint velocity information to the observation. + + This wrapper computes joint velocities by tracking changes in joint positions over time, + and extends the observation space to include these velocities. + """ + + def __init__(self, env, joint_velocity_limits=100.0, fps=30, num_dof=6): + """ + Initialize the joint velocity wrapper. + + Args: + env: The environment to wrap. + joint_velocity_limits: Maximum expected joint velocity for space bounds. + fps: Frames per second used to calculate velocity (position delta / time). + num_dof: Number of degrees of freedom (joints) in the robot. + """ + super().__init__(env) + + # Extend observation space to include joint velocities + old_low = self.observation_space["observation.state"].low + old_high = self.observation_space["observation.state"].high + old_shape = self.observation_space["observation.state"].shape + + self.last_joint_positions = np.zeros(num_dof) + + new_low = np.concatenate([old_low, np.ones(num_dof) * -joint_velocity_limits]) + new_high = np.concatenate([old_high, np.ones(num_dof) * joint_velocity_limits]) + + new_shape = (old_shape[0] + num_dof,) + + self.observation_space["observation.state"] = gym.spaces.Box( + low=new_low, + high=new_high, + shape=new_shape, + dtype=np.float32, + ) + + self.dt = 1.0 / fps + + def observation(self, observation): + """ + Add joint velocity information to the observation. + + Args: + observation: The original observation from the environment. + + Returns: + The modified observation with joint velocities. + """ + joint_velocities = (observation["agent_pos"] - self.last_joint_positions) / self.dt + self.last_joint_positions = observation["agent_pos"] + observation["agent_pos"] = np.concatenate([observation["agent_pos"], joint_velocities], axis=-1) + return observation + + +class AddCurrentToObservation(gym.ObservationWrapper): + """ + Wrapper that adds motor current information to the observation. + + This wrapper extends the observation space to include the current values + from each motor, providing information about the forces being applied. + """ + + def __init__(self, env, max_current=500, num_dof=6): + """ + Initialize the current observation wrapper. + + Args: + env: The environment to wrap. + max_current: Maximum expected current for space bounds. + num_dof: Number of degrees of freedom (joints) in the robot. + """ + super().__init__(env) + + # Extend observation space to include joint velocities + old_low = self.observation_space["observation.state"].low + old_high = self.observation_space["observation.state"].high + old_shape = self.observation_space["observation.state"].shape + + new_low = np.concatenate([old_low, np.zeros(num_dof)]) + new_high = np.concatenate([old_high, np.ones(num_dof) * max_current]) + + new_shape = (old_shape[0] + num_dof,) + + self.observation_space["observation.state"] = gym.spaces.Box( + low=new_low, + high=new_high, + shape=new_shape, + dtype=np.float32, + ) + + def observation(self, observation): + """ + Add current information to the observation. + + Args: + observation: The original observation from the environment. + + Returns: + The modified observation with current values. + """ + present_current_observation = self.unwrapped._get_observation()["agent_pos"] + observation["agent_pos"] = np.concatenate( + [observation["agent_pos"], present_current_observation], axis=-1 + ) + return observation + + +class RewardWrapper(gym.Wrapper): + def __init__(self, env, reward_classifier, device="cuda"): + """ + Wrapper to add reward prediction to the environment using a trained classifier. + + Args: + env: The environment to wrap. + reward_classifier: The reward classifier model. + device: The device to run the model on. + """ + self.env = env + + self.device = device + + self.reward_classifier = torch.compile(reward_classifier) + self.reward_classifier.to(self.device) + + def step(self, action): + """ + Execute a step and compute the reward using the classifier. + + Args: + action: The action to take in the environment. + + Returns: + Tuple of (observation, reward, terminated, truncated, info). + """ + observation, _, terminated, truncated, info = self.env.step(action) + + images = {} + for key in observation: + if "image" in key: + images[key] = observation[key].to(self.device, non_blocking=(self.device == "cuda")) + if images[key].dim() == 3: + images[key] = images[key].unsqueeze(0) + + start_time = time.perf_counter() + with torch.inference_mode(): + success = ( + self.reward_classifier.predict_reward(images, threshold=0.7) + if self.reward_classifier is not None + else 0.0 + ) + info["Reward classifier frequency"] = 1 / (time.perf_counter() - start_time) + + reward = 0.0 + if success == 1.0: + terminated = True + reward = 1.0 + + return observation, reward, terminated, truncated, info + + def reset(self, seed=None, options=None): + """ + Reset the environment. + + Args: + seed: Random seed for reproducibility. + options: Additional reset options. + + Returns: + The initial observation and info from the wrapped environment. + """ + return self.env.reset(seed=seed, options=options) + + +class TimeLimitWrapper(gym.Wrapper): + """ + Wrapper that adds a time limit to episodes and tracks execution time. + + This wrapper terminates episodes after a specified time has elapsed, providing + better control over episode length. + """ + + def __init__(self, env, control_time_s, fps): + """ + Initialize the time limit wrapper. + + Args: + env: The environment to wrap. + control_time_s: Maximum episode duration in seconds. + fps: Frames per second for calculating the maximum number of steps. + """ + self.env = env + self.control_time_s = control_time_s + self.fps = fps + + self.last_timestamp = 0.0 + self.episode_time_in_s = 0.0 + + self.max_episode_steps = int(self.control_time_s * self.fps) + + self.current_step = 0 + + def step(self, action): + """ + Step the environment and track time elapsed. + + Args: + action: The action to take in the environment. + + Returns: + Tuple of (observation, reward, terminated, truncated, info). + """ + obs, reward, terminated, truncated, info = self.env.step(action) + time_since_last_step = time.perf_counter() - self.last_timestamp + self.episode_time_in_s += time_since_last_step + self.last_timestamp = time.perf_counter() + self.current_step += 1 + # check if last timestep took more time than the expected fps + if 1.0 / time_since_last_step < self.fps: + logging.debug(f"Current timestep exceeded expected fps {self.fps}") + + if self.current_step >= self.max_episode_steps: + terminated = True + return obs, reward, terminated, truncated, info + + def reset(self, seed=None, options=None): + """ + Reset the environment and time tracking. + + Args: + seed: Random seed for reproducibility. + options: Additional reset options. + + Returns: + The initial observation and info from the wrapped environment. + """ + self.episode_time_in_s = 0.0 + self.last_timestamp = time.perf_counter() + self.current_step = 0 + return self.env.reset(seed=seed, options=options) + + +class ImageCropResizeWrapper(gym.Wrapper): + """ + Wrapper that crops and resizes image observations. + + This wrapper processes image observations to focus on relevant regions by + cropping and then resizing to a standard size. + """ + + def __init__( + self, + env, + crop_params_dict: dict[str, Annotated[tuple[int], 4]], + resize_size=None, + ): + """ + Initialize the image crop and resize wrapper. + + Args: + env: The environment to wrap. + crop_params_dict: Dictionary mapping image observation keys to crop parameters + (top, left, height, width). + resize_size: Target size for resized images (height, width). Defaults to (128, 128). + """ + super().__init__(env) + self.env = env + self.crop_params_dict = crop_params_dict + print(f"obs_keys , {self.env.observation_space}") + print(f"crop params dict {crop_params_dict.keys()}") + for key_crop in crop_params_dict: + if key_crop not in self.env.observation_space.keys(): # noqa: SIM118 + raise ValueError(f"Key {key_crop} not in observation space") + for key in crop_params_dict: + new_shape = (3, resize_size[0], resize_size[1]) + self.observation_space[key] = gym.spaces.Box(low=0, high=255, shape=new_shape) + + self.resize_size = resize_size + if self.resize_size is None: + self.resize_size = (128, 128) + + def step(self, action): + """ + Step the environment and process image observations. + + Args: + action: The action to take in the environment. + + Returns: + Tuple of (observation, reward, terminated, truncated, info) with processed images. + """ + obs, reward, terminated, truncated, info = self.env.step(action) + for k in self.crop_params_dict: + device = obs[k].device + if obs[k].dim() >= 3: + # Reshape to combine height and width dimensions for easier calculation + batch_size = obs[k].size(0) + channels = obs[k].size(1) + flattened_spatial_dims = obs[k].view(batch_size, channels, -1) + + # Calculate standard deviation across spatial dimensions (H, W) + # If any channel has std=0, all pixels in that channel have the same value + # This is helpful if one camera mistakenly covered or the image is black + std_per_channel = torch.std(flattened_spatial_dims, dim=2) + if (std_per_channel <= 0.02).any(): + logging.warning( + f"Potential hardware issue detected: All pixels have the same value in observation {k}" + ) + + if device == torch.device("mps:0"): + obs[k] = obs[k].cpu() + + obs[k] = F.crop(obs[k], *self.crop_params_dict[k]) + obs[k] = F.resize(obs[k], self.resize_size) + # TODO (michel-aractingi): Bug in resize, it returns values outside [0, 1] + obs[k] = obs[k].clamp(0.0, 1.0) + obs[k] = obs[k].to(device) + + return obs, reward, terminated, truncated, info + + def reset(self, seed=None, options=None): + """ + Reset the environment and process image observations. + + Args: + seed: Random seed for reproducibility. + options: Additional reset options. + + Returns: + Tuple of (observation, info) with processed images. + """ + obs, info = self.env.reset(seed=seed, options=options) + for k in self.crop_params_dict: + device = obs[k].device + if device == torch.device("mps:0"): + obs[k] = obs[k].cpu() + obs[k] = F.crop(obs[k], *self.crop_params_dict[k]) + obs[k] = F.resize(obs[k], self.resize_size) + obs[k] = obs[k].clamp(0.0, 1.0) + obs[k] = obs[k].to(device) + return obs, info + + +class ConvertToLeRobotObservation(gym.ObservationWrapper): + """ + Wrapper that converts standard observations to LeRobot format. + + This wrapper processes observations to match the expected format for LeRobot, + including normalizing image values and moving tensors to the specified device. + """ + + def __init__(self, env, device: str = "cpu"): + """ + Initialize the LeRobot observation converter. + + Args: + env: The environment to wrap. + device: Target device for the observation tensors. + """ + super().__init__(env) + + self.device = torch.device(device) + + def observation(self, observation): + """ + Convert observations to LeRobot format. + + Args: + observation: The original observation from the environment. + + Returns: + The processed observation with normalized images and proper tensor formats. + """ + observation = preprocess_observation(observation) + observation = { + key: observation[key].to(self.device, non_blocking=self.device.type == "cuda") + for key in observation + } + return observation + + +class ResetWrapper(gym.Wrapper): + """ + Wrapper that handles environment reset procedures. + + This wrapper provides additional functionality during environment reset, + including the option to reset to a fixed pose or allow manual reset. + """ + + def __init__( + self, + env: RobotEnv, + reset_pose: np.ndarray | None = None, + reset_time_s: float = 5, + ): + """ + Initialize the reset wrapper. + + Args: + env: The environment to wrap. + reset_pose: Fixed joint positions to reset to. If None, manual reset is used. + reset_time_s: Time in seconds to wait after reset or allowed for manual reset. + """ + super().__init__(env) + self.reset_time_s = reset_time_s + self.reset_pose = reset_pose + self.robot = self.unwrapped.robot + + def reset(self, *, seed=None, options=None): + """ + Reset the environment with either fixed or manual reset procedure. + + If reset_pose is provided, the robot will move to that position. + Otherwise, manual teleoperation control is allowed for reset_time_s seconds. + + Args: + seed: Random seed for reproducibility. + options: Additional reset options. + + Returns: + The initial observation and info from the wrapped environment. + """ + start_time = time.perf_counter() + if self.reset_pose is not None: + log_say("Reset the environment.", play_sounds=True) + reset_follower_position(self.unwrapped.robot, self.reset_pose) + log_say("Reset the environment done.", play_sounds=True) + + if hasattr(self.env, "robot_leader"): + self.env.robot_leader.bus.sync_write("Torque_Enable", 1) + log_say("Reset the leader robot.", play_sounds=True) + reset_follower_position(self.env.robot_leader, self.reset_pose) + log_say("Reset the leader robot done.", play_sounds=True) + else: + log_say( + f"Manually reset the environment for {self.reset_time_s} seconds.", + play_sounds=True, + ) + start_time = time.perf_counter() + while time.perf_counter() - start_time < self.reset_time_s: + action = self.env.robot_leader.get_action() + self.unwrapped.robot.send_action(action) + + log_say("Manual reset of the environment done.", play_sounds=True) + + busy_wait(self.reset_time_s - (time.perf_counter() - start_time)) + + return super().reset(seed=seed, options=options) + + +class BatchCompatibleWrapper(gym.ObservationWrapper): + """ + Wrapper that ensures observations are compatible with batch processing. + + This wrapper adds a batch dimension to observations that don't already have one, + making them compatible with models that expect batched inputs. + """ + + def __init__(self, env): + """ + Initialize the batch compatibility wrapper. + + Args: + env: The environment to wrap. + """ + super().__init__(env) + + def observation(self, observation: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]: + """ + Add batch dimensions to observations if needed. + + Args: + observation: Dictionary of observation tensors. + + Returns: + Dictionary of observation tensors with batch dimensions. + """ + for key in observation: + if "image" in key and observation[key].dim() == 3: + observation[key] = observation[key].unsqueeze(0) + if "state" in key and observation[key].dim() == 1: + observation[key] = observation[key].unsqueeze(0) + if "velocity" in key and observation[key].dim() == 1: + observation[key] = observation[key].unsqueeze(0) + return observation + + +class GripperPenaltyWrapper(gym.RewardWrapper): + """ + Wrapper that adds penalties for inefficient gripper commands. + + This wrapper modifies rewards to discourage excessive gripper movement + or commands that attempt to move the gripper beyond its physical limits. + """ + + def __init__(self, env, penalty: float = -0.1): + """ + Initialize the gripper penalty wrapper. + + Args: + env: The environment to wrap. + penalty: Negative reward value to apply for inefficient gripper actions. + """ + super().__init__(env) + self.penalty = penalty + self.last_gripper_state = None + + def reward(self, reward, action): + """ + Apply penalties to reward based on gripper actions. + + Args: + reward: The original reward from the environment. + action: The action that was taken. + + Returns: + Modified reward with penalty applied if necessary. + """ + gripper_state_normalized = self.last_gripper_state / self.unwrapped.robot.config.max_gripper_pos + + action_normalized = action - 1.0 # action / MAX_GRIPPER_COMMAND + + gripper_penalty_bool = (gripper_state_normalized < 0.5 and action_normalized > 0.5) or ( + gripper_state_normalized > 0.75 and action_normalized < -0.5 + ) + + return reward + self.penalty * int(gripper_penalty_bool) + + def step(self, action): + """ + Step the environment and apply gripper penalties. + + Args: + action: The action to take in the environment. + + Returns: + Tuple of (observation, reward, terminated, truncated, info) with penalty applied. + """ + self.last_gripper_state = self.unwrapped.robot.bus.sync_read("Present_Position")["gripper"] + + gripper_action = action[-1] + obs, reward, terminated, truncated, info = self.env.step(action) + gripper_penalty = self.reward(reward, gripper_action) + + info["discrete_penalty"] = gripper_penalty + + return obs, reward, terminated, truncated, info + + def reset(self, **kwargs): + """ + Reset the environment and penalty tracking. + + Args: + **kwargs: Keyword arguments passed to the wrapped environment's reset. + + Returns: + The initial observation and info with gripper penalty initialized. + """ + self.last_gripper_state = None + obs, info = super().reset(**kwargs) + info["gripper_penalty"] = 0.0 + return obs, info + + +class GripperActionWrapper(gym.ActionWrapper): + """ + Wrapper that processes gripper control commands. + + This wrapper quantizes and processes gripper commands, adding a sleep time between + consecutive gripper actions to prevent rapid toggling. + """ + + def __init__(self, env, quantization_threshold: float = 0.2, gripper_sleep: float = 0.0): + """ + Initialize the gripper action wrapper. + + Args: + env: The environment to wrap. + quantization_threshold: Threshold below which gripper commands are quantized to zero. + gripper_sleep: Minimum time in seconds between consecutive gripper commands. + """ + super().__init__(env) + self.quantization_threshold = quantization_threshold + self.gripper_sleep = gripper_sleep + self.last_gripper_action_time = 0.0 + self.last_gripper_action = None + + def action(self, action): + """ + Process gripper commands in the action. + + Args: + action: The original action from the agent. + + Returns: + Modified action with processed gripper command. + """ + if self.gripper_sleep > 0.0: + if ( + self.last_gripper_action is not None + and time.perf_counter() - self.last_gripper_action_time < self.gripper_sleep + ): + action[-1] = self.last_gripper_action + else: + self.last_gripper_action_time = time.perf_counter() + self.last_gripper_action = action[-1] + + gripper_command = action[-1] + # Gripper actions are between 0, 2 + # we want to quantize them to -1, 0 or 1 + gripper_command = gripper_command - 1.0 + + if self.quantization_threshold is not None: + # Quantize gripper command to -1, 0 or 1 + gripper_command = ( + np.sign(gripper_command) if abs(gripper_command) > self.quantization_threshold else 0.0 + ) + gripper_command = gripper_command * self.unwrapped.robot.config.max_gripper_pos + + gripper_state = self.unwrapped.robot.bus.sync_read("Present_Position")["gripper"] + + gripper_action_value = np.clip( + gripper_state + gripper_command, 0, self.unwrapped.robot.config.max_gripper_pos + ) + action[-1] = gripper_action_value.item() + return action + + def reset(self, **kwargs): + """ + Reset the gripper action tracking. + + Args: + **kwargs: Keyword arguments passed to the wrapped environment's reset. + + Returns: + The initial observation and info. + """ + obs, info = super().reset(**kwargs) + self.last_gripper_action_time = 0.0 + self.last_gripper_action = None + return obs, info + + +class EEObservationWrapper(gym.ObservationWrapper): + """ + Wrapper that adds end-effector pose information to observations. + + This wrapper computes the end-effector pose using forward kinematics + and adds it to the observation space. + """ + + def __init__(self, env, ee_pose_limits): + """ + Initialize the end-effector observation wrapper. + + Args: + env: The environment to wrap. + ee_pose_limits: Dictionary with 'min' and 'max' keys containing limits for EE pose. + """ + super().__init__(env) + + # Extend observation space to include end effector pose + prev_space = self.observation_space["observation.state"] + + self.observation_space["observation.state"] = gym.spaces.Box( + low=np.concatenate([prev_space.low, ee_pose_limits["min"]]), + high=np.concatenate([prev_space.high, ee_pose_limits["max"]]), + shape=(prev_space.shape[0] + 3,), + dtype=np.float32, + ) + + # Initialize kinematics instance for the appropriate robot type + robot_type = getattr(env.unwrapped.robot.config, "robot_type", "so101") + if "so100" in robot_type or "so101" in robot_type: + # Note to be compatible with the rest of the codebase, + # we are using the new calibration method for so101 and so100 + robot_type = "so_new_calibration" + self.kinematics = RobotKinematics(robot_type) + + def observation(self, observation): + """ + Add end-effector pose to the observation. + + Args: + observation: Original observation from the environment. + + Returns: + Enhanced observation with end-effector pose information. + """ + current_joint_pos = self.unwrapped._get_observation()["agent_pos"] + + current_ee_pos = self.kinematics.forward_kinematics(current_joint_pos, frame="gripper_tip")[:3, 3] + observation["agent_pos"] = np.concatenate([observation["agent_pos"], current_ee_pos], -1) + return observation + + +########################################################### +# Wrappers related to human intervention and input devices +########################################################### + + +class BaseLeaderControlWrapper(gym.Wrapper): + """ + Base class for leader-follower robot control wrappers. + + This wrapper enables human intervention through a leader-follower robot setup, + where the human can control a leader robot to guide the follower robot's movements. + """ + + def __init__( + self, + env, + teleop_device, + end_effector_step_sizes, + use_geared_leader_arm: bool = False, + use_gripper=False, + ): + """ + Initialize the base leader control wrapper. + + Args: + env: The environment to wrap. + teleop_device: The teleoperation device. + use_geared_leader_arm: Whether to use a geared leader arm setup. + use_gripper: Whether to include gripper control. + """ + super().__init__(env) + self.robot_leader = teleop_device + self.robot_follower = env.unwrapped.robot + self.use_geared_leader_arm = use_geared_leader_arm + self.use_gripper: bool = use_gripper + self.end_effector_step_sizes = np.array(list(end_effector_step_sizes.values())) + + # Set up keyboard event tracking + self._init_keyboard_events() + self.event_lock = Lock() # Thread-safe access to events + + # Initialize robot control + robot_type = getattr(env.unwrapped.robot.config, "robot_type", "so101") + if "so100" in robot_type or "so101" in robot_type: + # Note to be compatible with the rest of the codebase, + # we are using the new calibration method for so101 and so100 + robot_type = "so_new_calibration" + self.kinematics = RobotKinematics(robot_type) + self.leader_torque_enabled = True + self.prev_leader_gripper = None + + # Configure leader arm + # NOTE: Lower the gains of leader arm for automatic take-over + # With lower gains we can manually move the leader arm without risk of injury to ourselves or the robot + # With higher gains, it would be dangerous and difficult to modify the leader's pose while torque is enabled + # Default value for P_coeff is 32 + self.robot_leader.bus.sync_write("Torque_Enable", 1) + for motor in self.robot_leader.bus.motors: + self.robot_leader.bus.write("P_Coefficient", motor, 16) + self.robot_leader.bus.write("I_Coefficient", motor, 0) + self.robot_leader.bus.write("D_Coefficient", motor, 16) + + self.leader_tracking_error_queue = deque(maxlen=4) + self._init_keyboard_listener() + + def _init_keyboard_events(self): + """ + Initialize the keyboard events dictionary. + + This method sets up tracking for keyboard events used for intervention control. + It should be overridden in subclasses to add additional events. + """ + self.keyboard_events = { + "episode_success": False, + "episode_end": False, + "rerecord_episode": False, + } + + def _handle_key_press(self, key, keyboard): + """ + Handle key press events. + + Args: + key: The key that was pressed. + keyboard: The keyboard module with key definitions. + + This method should be overridden in subclasses for additional key handling. + """ + try: + if key == keyboard.Key.esc: + self.keyboard_events["episode_end"] = True + return + if key == keyboard.Key.left: + self.keyboard_events["rerecord_episode"] = True + return + if hasattr(key, "char") and key.char == "s": + logging.info("Key 's' pressed. Episode success triggered.") + self.keyboard_events["episode_success"] = True + return + except Exception as e: + logging.error(f"Error handling key press: {e}") + + def _init_keyboard_listener(self): + """ + Initialize the keyboard listener for intervention control. + + This method sets up keyboard event handling if not in headless mode. + """ + from pynput import keyboard + + def on_press(key): + with self.event_lock: + self._handle_key_press(key, keyboard) + + self.listener = keyboard.Listener(on_press=on_press) + self.listener.start() + + def _check_intervention(self): + """ + Check if human intervention is needed. + + Returns: + Boolean indicating whether intervention is needed. + + This method should be overridden in subclasses with specific intervention logic. + """ + return False + + def _handle_intervention(self, action): + """ + Process actions during intervention mode. + + Args: + action: The original action from the agent. + + Returns: + Tuple of (modified_action, intervention_action). + """ + if self.leader_torque_enabled: + self.robot_leader.bus.sync_write("Torque_Enable", 0) + self.leader_torque_enabled = False + + leader_pos_dict = self.robot_leader.bus.sync_read("Present_Position") + follower_pos_dict = self.robot_follower.bus.sync_read("Present_Position") + + leader_pos = np.array([leader_pos_dict[name] for name in leader_pos_dict], dtype=np.float32) + follower_pos = np.array([follower_pos_dict[name] for name in follower_pos_dict], dtype=np.float32) + + self.leader_tracking_error_queue.append(np.linalg.norm(follower_pos[:-1] - leader_pos[:-1])) + + # [:3, 3] Last column of the transformation matrix corresponds to the xyz translation + leader_ee = self.kinematics.forward_kinematics(leader_pos, frame="gripper_tip")[:3, 3] + follower_ee = self.kinematics.forward_kinematics(follower_pos, frame="gripper_tip")[:3, 3] + + action = np.clip(leader_ee - follower_ee, -self.end_effector_step_sizes, self.end_effector_step_sizes) + # Normalize the action to the range [-1, 1] + action = action / self.end_effector_step_sizes + + if self.use_gripper: + if self.prev_leader_gripper is None: + self.prev_leader_gripper = np.clip( + leader_pos[-1], 0, self.robot_follower.config.max_gripper_pos + ) + + # Get gripper action delta based on leader pose + leader_gripper = leader_pos[-1] + gripper_delta = leader_gripper - self.prev_leader_gripper + + # Normalize by max angle and quantize to {0,1,2} + normalized_delta = gripper_delta / self.robot_follower.config.max_gripper_pos + if normalized_delta >= 0.3: + gripper_action = 2 + elif normalized_delta <= 0.1: + gripper_action = 0 + else: + gripper_action = 1 + + action = np.append(action, gripper_action) + + return action + + def _handle_leader_teleoperation(self): + """ + Handle leader teleoperation in non-intervention mode. + + This method synchronizes the leader robot position with the follower. + """ + + prev_leader_pos_dict = self.robot_leader.bus.sync_read("Present_Position") + prev_leader_pos = np.array( + [prev_leader_pos_dict[name] for name in prev_leader_pos_dict], dtype=np.float32 + ) + + if not self.leader_torque_enabled: + self.robot_leader.bus.sync_write("Torque_Enable", 1) + self.leader_torque_enabled = True + + follower_pos_dict = self.robot_follower.bus.sync_read("Present_Position") + follower_pos = np.array([follower_pos_dict[name] for name in follower_pos_dict], dtype=np.float32) + + goal_pos = {f"{motor}": follower_pos[i] for i, motor in enumerate(self.robot_leader.bus.motors)} + self.robot_leader.bus.sync_write("Goal_Position", goal_pos) + + self.leader_tracking_error_queue.append(np.linalg.norm(follower_pos[:-1] - prev_leader_pos[:-1])) + + def step(self, action): + """ + Execute a step with possible human intervention. + + Args: + action: The action to take in the environment. + + Returns: + Tuple of (observation, reward, terminated, truncated, info). + """ + is_intervention = self._check_intervention() + + # NOTE: + if is_intervention: + action = self._handle_intervention(action) + else: + self._handle_leader_teleoperation() + + # NOTE: + obs, reward, terminated, truncated, info = self.env.step(action) + + # Add intervention info + info["is_intervention"] = is_intervention + info["action_intervention"] = action if is_intervention else None + + self.prev_leader_gripper = np.clip( + self.robot_leader.bus.sync_read("Present_Position")["gripper"], + 0, + self.robot_follower.config.max_gripper_pos, + ) + + # Check for success or manual termination + success = self.keyboard_events["episode_success"] + terminated = terminated or self.keyboard_events["episode_end"] or success + + if success: + reward = 1.0 + logging.info("Episode ended successfully with reward 1.0") + + return obs, reward, terminated, truncated, info + + def reset(self, **kwargs): + """ + Reset the environment and intervention state. + + Args: + **kwargs: Keyword arguments passed to the wrapped environment's reset. + + Returns: + The initial observation and info. + """ + self.keyboard_events = dict.fromkeys(self.keyboard_events, False) + self.leader_tracking_error_queue.clear() + return super().reset(**kwargs) + + def close(self): + """ + Clean up resources, including stopping keyboard listener. + + Returns: + Result of closing the wrapped environment. + """ + if hasattr(self, "listener") and self.listener is not None: + self.listener.stop() + return self.env.close() + + +class GearedLeaderControlWrapper(BaseLeaderControlWrapper): + """ + Wrapper that enables manual intervention via keyboard. + + This wrapper extends the BaseLeaderControlWrapper to allow explicit toggling + of human intervention mode with keyboard controls. + """ + + def _init_keyboard_events(self): + """ + Initialize keyboard events including human intervention flag. + + Extends the base class dictionary with an additional flag for tracking + intervention state toggled by keyboard. + """ + super()._init_keyboard_events() + self.keyboard_events["human_intervention_step"] = False + + def _handle_key_press(self, key, keyboard): + """ + Handle key presses including space for intervention toggle. + + Args: + key: The key that was pressed. + keyboard: The keyboard module with key definitions. + + Extends the base handler to respond to space key for toggling intervention. + """ + super()._handle_key_press(key, keyboard) + if key == keyboard.Key.space: + if not self.keyboard_events["human_intervention_step"]: + logging.info( + "Space key pressed. Human intervention required.\n" + "Place the leader in similar pose to the follower and press space again." + ) + self.keyboard_events["human_intervention_step"] = True + log_say("Human intervention step.", play_sounds=True) + else: + self.keyboard_events["human_intervention_step"] = False + logging.info("Space key pressed for a second time.\nContinuing with policy actions.") + log_say("Continuing with policy actions.", play_sounds=True) + + def _check_intervention(self): + """ + Check if human intervention is active based on keyboard toggle. + + Returns: + Boolean indicating whether intervention mode is active. + """ + return self.keyboard_events["human_intervention_step"] + + +class GearedLeaderAutomaticControlWrapper(BaseLeaderControlWrapper): + """ + Wrapper with automatic intervention based on error thresholds. + + This wrapper monitors the error between leader and follower positions + and automatically triggers intervention when error exceeds thresholds. + """ + + def __init__( + self, + env, + teleop_device, + end_effector_step_sizes, + use_gripper=False, + intervention_threshold=10.0, + release_threshold=1e-2, + ): + """ + Initialize the automatic intervention wrapper. + + Args: + env: The environment to wrap. + teleop_device: The teleoperation device. + use_gripper: Whether to include gripper control. + intervention_threshold: Error threshold to trigger intervention. + release_threshold: Error threshold to release intervention. + queue_size: Number of error measurements to track for smoothing. + """ + super().__init__(env, teleop_device, end_effector_step_sizes, use_gripper=use_gripper) + + # Error tracking parameters + self.intervention_threshold = intervention_threshold # Threshold to trigger intervention + self.release_threshold = release_threshold # Threshold to release intervention + self.is_intervention_active = False + self.start_time = time.perf_counter() + + def _check_intervention(self): + """ + Determine if intervention should occur based on the rate of change of leader-follower error in end_effector space. + + This method monitors the rate of change of leader-follower error in end_effector space + and automatically triggers intervention when the rate of change exceeds + the intervention threshold, releasing when it falls below the release threshold. + + Returns: + Boolean indicating whether intervention should be active. + """ + + # Condition for starting the intervention + # If the error in teleoperation is too high, that means the a user has grasped the leader robot and he wants to take over + if ( + not self.is_intervention_active + and len(self.leader_tracking_error_queue) == self.leader_tracking_error_queue.maxlen + and np.var(list(self.leader_tracking_error_queue)[-2:]) > self.intervention_threshold + ): + self.is_intervention_active = True + self.leader_tracking_error_queue.clear() + log_say("Intervention started", play_sounds=True) + return True + + # Track the error over time in leader_tracking_error_queue + # If the variance of the tracking error is too low, that means the user has let go of the leader robot and the intervention is over + if ( + self.is_intervention_active + and len(self.leader_tracking_error_queue) == self.leader_tracking_error_queue.maxlen + and np.var(self.leader_tracking_error_queue) < self.release_threshold + ): + self.is_intervention_active = False + self.leader_tracking_error_queue.clear() + log_say("Intervention ended", play_sounds=True) + return False + + # If not change has happened that merits a change in the intervention state, return the current state + return self.is_intervention_active + + def reset(self, **kwargs): + """ + Reset error tracking on environment reset. + + Args: + **kwargs: Keyword arguments passed to the wrapped environment's reset. + + Returns: + The initial observation and info. + """ + self.is_intervention_active = False + return super().reset(**kwargs) + + +class GamepadControlWrapper(gym.Wrapper): + """ + Wrapper that allows controlling a gym environment with a gamepad. + + This wrapper intercepts the step method and allows human input via gamepad + to override the agent's actions when desired. + """ + + def __init__( + self, + env, + teleop_device, # Accepts an instantiated teleoperator + use_gripper=False, # This should align with teleop_device's config + auto_reset=False, + ): + """ + Initialize the gamepad controller wrapper. + + Args: + env: The environment to wrap. + teleop_device: The instantiated teleoperation device (e.g., GamepadTeleop). + use_gripper: Whether to include gripper control (should match teleop_device.config.use_gripper). + auto_reset: Whether to auto reset the environment when episode ends. + """ + super().__init__(env) + + self.teleop_device = teleop_device + # Ensure the teleop_device is connected if it has a connect method + if hasattr(self.teleop_device, "connect") and not self.teleop_device.is_connected: + self.teleop_device.connect() + + # self.controller attribute is removed + + self.auto_reset = auto_reset + # use_gripper from args should ideally match teleop_device.config.use_gripper + # For now, we use the one passed, but it can lead to inconsistency if not set correctly from config + self.use_gripper = use_gripper + + logging.info("Gamepad control wrapper initialized with provided teleop_device.") + print( + "Gamepad controls (managed by the provided teleop_device - specific button mappings might vary):" + ) + print(" Left analog stick: Move in X-Y plane") + print(" Right analog stick: Move in Z axis (up/down)") + print(" X/Square button: End episode (FAILURE)") + print(" Y/Triangle button: End episode (SUCCESS)") + print(" B/Circle button: Exit program") + + def get_gamepad_action( + self, + ) -> tuple[bool, np.ndarray, bool, bool, bool]: + """ + Get the current action from the gamepad if any input is active. + + Returns: + Tuple containing: + - is_active: Whether gamepad input is active (from teleop_device.gamepad.should_intervene()) + - action: The action derived from gamepad input (from teleop_device.get_action()) + - terminate_episode: Whether episode termination was requested + - success: Whether episode success was signaled + - rerecord_episode: Whether episode rerecording was requested + """ + if not hasattr(self.teleop_device, "gamepad") or self.teleop_device.gamepad is None: + raise AttributeError( + "teleop_device does not have a 'gamepad' attribute or it is None. Expected for GamepadControlWrapper." + ) + + # Get status flags from the underlying gamepad controller within the teleop_device + self.teleop_device.gamepad.update() # Ensure gamepad state is fresh + intervention_is_active = self.teleop_device.gamepad.should_intervene() + episode_end_status = self.teleop_device.gamepad.get_episode_end_status() + + terminate_episode = episode_end_status is not None + success = episode_end_status == "success" + rerecord_episode = episode_end_status == "rerecord_episode" + + # Get the action dictionary from the teleop_device + action_dict = self.teleop_device.get_action() + + # Convert action_dict to numpy array based on expected structure + # Order: delta_x, delta_y, delta_z, gripper (if use_gripper) + action_list = [action_dict["delta_x"], action_dict["delta_y"], action_dict["delta_z"]] + if self.use_gripper: + # GamepadTeleop returns gripper action as 0 (close), 1 (stay), 2 (open) + # This needs to be consistent with what EEActionWrapper expects if it's used downstream + # EEActionWrapper for gripper typically expects 0.0 (closed) to 2.0 (open) + # For now, we pass the direct value from GamepadTeleop, ensure downstream compatibility. + gripper_val = action_dict.get("gripper", 1.0) # Default to 1.0 (stay) if not present + action_list.append(float(gripper_val)) + + gamepad_action_np = np.array(action_list, dtype=np.float32) + + return ( + intervention_is_active, + gamepad_action_np, + terminate_episode, + success, + rerecord_episode, + ) + + def step(self, action): + """ + Step the environment, using gamepad input to override actions when active. + + Args: + action: Original action from agent. + + Returns: + Tuple of (observation, reward, terminated, truncated, info). + """ + # Get gamepad state and action + ( + is_intervention, + gamepad_action, + terminate_episode, + success, + rerecord_episode, + ) = self.get_gamepad_action() + + # Update episode ending state if requested + if terminate_episode: + logging.info(f"Episode manually ended: {'SUCCESS' if success else 'FAILURE'}") + + # Only override the action if gamepad is active + action = gamepad_action if is_intervention else action + + # Step the environment + obs, reward, terminated, truncated, info = self.env.step(action) + + # Add episode ending if requested via gamepad + terminated = terminated or truncated or terminate_episode + + if success: + reward = 1.0 + logging.info("Episode ended successfully with reward 1.0") + + if isinstance(action, np.ndarray): + action = torch.from_numpy(action) + + info["is_intervention"] = is_intervention + # The original `BaseLeaderControlWrapper` puts `action_intervention` in info. + # For Gamepad, if intervention, `gamepad_action` is the intervention. + # If not intervention, policy's action is `action`. + # For consistency, let's store the *human's* action if intervention occurred. + info["action_intervention"] = action + + info["rerecord_episode"] = rerecord_episode + + # If episode ended, reset the state + if terminated or truncated: + # Add success/failure information to info dict + info["next.success"] = success + + # Auto reset if configured + if self.auto_reset: + obs, reset_info = self.reset() + info.update(reset_info) + + return obs, reward, terminated, truncated, info + + def close(self): + """ + Clean up resources when environment closes. + + Returns: + Result of closing the wrapped environment. + """ + if hasattr(self.teleop_device, "disconnect"): + self.teleop_device.disconnect() + + # Call the parent close method + return self.env.close() + + +class GymHilDeviceWrapper(gym.Wrapper): + def __init__(self, env, device="cpu"): + super().__init__(env) + self.device = device + + def step(self, action): + obs, reward, terminated, truncated, info = self.env.step(action) + for k in obs: + obs[k] = obs[k].to(self.device) + if "action_intervention" in info: + # NOTE: This is a hack to ensure the action intervention is a float32 tensor and supported on MPS device + info["action_intervention"] = info["action_intervention"].astype(np.float32) + info["action_intervention"] = torch.from_numpy(info["action_intervention"]).to(self.device) + return obs, reward, terminated, truncated, info + + def reset(self, *, seed: int | None = None, options: dict[str, Any] | None = None): + obs, info = self.env.reset(seed=seed, options=options) + for k in obs: + obs[k] = obs[k].to(self.device) + if "action_intervention" in info: + # NOTE: This is a hack to ensure the action intervention is a float32 tensor and supported on MPS device + info["action_intervention"] = info["action_intervention"].astype(np.float32) + info["action_intervention"] = torch.from_numpy(info["action_intervention"]).to(self.device) + return obs, info + + +class GymHilObservationProcessorWrapper(gym.ObservationWrapper): + def __init__(self, env: gym.Env): + super().__init__(env) + prev_space = self.observation_space + new_space = {} + + for key in prev_space: + if "pixels" in key: + for k in prev_space["pixels"]: + new_space[f"observation.images.{k}"] = gym.spaces.Box( + 0.0, 255.0, shape=(3, 128, 128), dtype=np.uint8 + ) + + if key == "agent_pos": + new_space["observation.state"] = prev_space["agent_pos"] + + self.observation_space = gym.spaces.Dict(new_space) + + def observation(self, observation: dict[str, Any]) -> dict[str, Any]: + return preprocess_observation(observation) + + +########################################################### +# Factory functions +########################################################### + + +def make_robot_env(cfg: EnvConfig) -> gym.Env: + """ + Factory function to create a robot environment. + + This function builds a robot environment with all necessary wrappers + based on the provided configuration. + + Args: + cfg: Configuration object containing environment parameters. + + Returns: + A gym environment with all necessary wrappers applied. + """ + if cfg.type == "hil": + import gym_hil # noqa: F401 + + # TODO (azouitine) + env = gym.make( + f"gym_hil/{cfg.task}", + image_obs=True, + render_mode="human", + use_gripper=cfg.wrapper.use_gripper, + gripper_penalty=cfg.wrapper.gripper_penalty, + ) + env = GymHilObservationProcessorWrapper(env=env) + env = GymHilDeviceWrapper(env=env, device=cfg.device) + env = BatchCompatibleWrapper(env=env) + env = TorchActionWrapper(env=env, device=cfg.device) + return env + + if not hasattr(cfg, "robot") or not hasattr(cfg, "teleop"): + raise ValueError( + "Configuration for 'gym_manipulator' must be HILSerlRobotEnvConfig with robot and teleop." + ) + + if cfg.robot is None: + raise ValueError("RobotConfig (cfg.robot) must be provided for gym_manipulator environment.") + robot = make_robot_from_config(cfg.robot) + + teleop_device = make_teleoperator_from_config(cfg.teleop) + teleop_device.connect() + + # Create base environment + env = RobotEnv( + robot=robot, + use_gripper=cfg.wrapper.use_gripper, + display_cameras=cfg.wrapper.display_cameras if cfg.wrapper else False, + ) + + # Add observation and image processing + if cfg.wrapper: + if cfg.wrapper.add_joint_velocity_to_observation: + env = AddJointVelocityToObservation(env=env, fps=cfg.fps) + if cfg.wrapper.add_current_to_observation: + env = AddCurrentToObservation(env=env) + if cfg.wrapper.add_ee_pose_to_observation: + env = EEObservationWrapper(env=env, ee_pose_limits=robot.end_effector_bounds) + + env = ConvertToLeRobotObservation(env=env, device=cfg.device) + + if cfg.wrapper and cfg.wrapper.crop_params_dict is not None: + env = ImageCropResizeWrapper( + env=env, + crop_params_dict=cfg.wrapper.crop_params_dict, + resize_size=cfg.wrapper.resize_size, + ) + + # Add reward computation and control wrappers + reward_classifier = init_reward_classifier(cfg) + if reward_classifier is not None: + env = RewardWrapper(env=env, reward_classifier=reward_classifier, device=cfg.device) + + env = TimeLimitWrapper(env=env, control_time_s=cfg.wrapper.control_time_s, fps=cfg.fps) + if cfg.wrapper.use_gripper and cfg.wrapper.gripper_penalty is not None: + env = GripperPenaltyWrapper( + env=env, + penalty=cfg.wrapper.gripper_penalty, + ) + + # Control mode specific wrappers + control_mode = cfg.wrapper.control_mode + if control_mode == "gamepad": + assert isinstance(teleop_device, GamepadTeleop), ( + "teleop_device must be an instance of GamepadTeleop for gamepad control mode" + ) + env = GamepadControlWrapper( + env=env, + teleop_device=teleop_device, + use_gripper=cfg.wrapper.use_gripper, + ) + elif control_mode == "leader": + env = GearedLeaderControlWrapper( + env=env, + teleop_device=teleop_device, + end_effector_step_sizes=cfg.robot.end_effector_step_sizes, + use_gripper=cfg.wrapper.use_gripper, + ) + elif control_mode == "leader_automatic": + env = GearedLeaderAutomaticControlWrapper( + env=env, + teleop_device=teleop_device, + end_effector_step_sizes=cfg.robot.end_effector_step_sizes, + use_gripper=cfg.wrapper.use_gripper, + ) + else: + raise ValueError(f"Invalid control mode: {control_mode}") + + env = ResetWrapper( + env=env, + reset_pose=cfg.wrapper.fixed_reset_joint_positions, + reset_time_s=cfg.wrapper.reset_time_s, + ) + + env = BatchCompatibleWrapper(env=env) + env = TorchActionWrapper(env=env, device=cfg.device) + + return env + + +def init_reward_classifier(cfg): + """ + Load a reward classifier policy from a pretrained path if configured. + + Args: + cfg: The environment configuration containing classifier paths. + + Returns: + The loaded classifier model or None if not configured. + """ + if cfg.reward_classifier_pretrained_path is None: + return None + + from lerobot.common.policies.sac.reward_model.modeling_classifier import Classifier + + # Get device from config or default to CUDA + device = getattr(cfg, "device", "cpu") + + # Load the classifier directly using from_pretrained + classifier = Classifier.from_pretrained( + pretrained_name_or_path=cfg.reward_classifier_pretrained_path, + ) + + # Ensure model is on the correct device + classifier.to(device) + classifier.eval() # Set to evaluation mode + + return classifier + + +########################################################### +# Record and replay functions +########################################################### + + +def record_dataset(env, policy, cfg): + """ + Record a dataset of robot interactions using either a policy or teleop. + + This function runs episodes in the environment and records the observations, + actions, and results for dataset creation. + + Args: + env: The environment to record from. + policy: Optional policy to generate actions (if None, uses teleop). + cfg: Configuration object containing recording parameters like: + - repo_id: Repository ID for dataset storage + - dataset_root: Local root directory for dataset + - num_episodes: Number of episodes to record + - fps: Frames per second for recording + - push_to_hub: Whether to push dataset to Hugging Face Hub + - task: Name/description of the task being recorded + - number_of_steps_after_success: Number of additional steps to continue recording after + a success (reward=1) is detected. This helps collect + more positive examples for reward classifier training. + """ + from lerobot.common.datasets.lerobot_dataset import LeRobotDataset + + # Setup initial action (zero action if using teleop) + action = env.action_space.sample() * 0.0 + + action_names = ["delta_x_ee", "delta_y_ee", "delta_z_ee"] + if cfg.wrapper.use_gripper: + action_names.append("gripper_delta") + + # Configure dataset features based on environment spaces + features = { + "observation.state": { + "dtype": "float32", + "shape": env.observation_space["observation.state"].shape, + "names": None, + }, + "action": { + "dtype": "float32", + "shape": (len(action_names),), + "names": action_names, + }, + "next.reward": {"dtype": "float32", "shape": (1,), "names": None}, + "next.done": {"dtype": "bool", "shape": (1,), "names": None}, + "complementary_info.discrete_penalty": { + "dtype": "float32", + "shape": (1,), + "names": ["discrete_penalty"], + }, + } + + # Add image features + for key in env.observation_space: + if "image" in key: + features[key] = { + "dtype": "video", + "shape": env.observation_space[key].shape, + "names": ["channels", "height", "width"], + } + + # Create dataset + dataset = LeRobotDataset.create( + cfg.repo_id, + cfg.fps, + root=cfg.dataset_root, + use_videos=True, + image_writer_threads=4, + image_writer_processes=0, + features=features, + ) + + # Record episodes + episode_index = 0 + recorded_action = None + while episode_index < cfg.num_episodes: + obs, _ = env.reset() + start_episode_t = time.perf_counter() + log_say(f"Recording episode {episode_index}", play_sounds=True) + + # Track success state collection + success_detected = False + success_steps_collected = 0 + + # Run episode steps + while time.perf_counter() - start_episode_t < cfg.wrapper.control_time_s: + start_loop_t = time.perf_counter() + + # Get action from policy if available + if cfg.pretrained_policy_name_or_path is not None: + action = policy.select_action(obs) + + # Step environment + obs, reward, terminated, truncated, info = env.step(action) + + # Check if episode needs to be rerecorded + if info.get("rerecord_episode", False): + break + + # For teleop, get action from intervention + recorded_action = { + "action": info["action_intervention"].cpu().squeeze(0).float() if policy is None else action + } + + # Process observation for dataset + obs_processed = {k: v.cpu().squeeze(0).float() for k, v in obs.items()} + + # Check if we've just detected success + if reward == 1.0 and not success_detected: + success_detected = True + logging.info("Success detected! Collecting additional success states.") + + # Add frame to dataset - continue marking as success even during extra collection steps + frame = {**obs_processed, **recorded_action} + + # If we're in the success collection phase, keep marking rewards as 1.0 + if success_detected: + frame["next.reward"] = np.array([1.0], dtype=np.float32) + else: + frame["next.reward"] = np.array([reward], dtype=np.float32) + + # Only mark as done if we're truly done (reached end or collected enough success states) + really_done = terminated or truncated + if success_detected: + success_steps_collected += 1 + really_done = success_steps_collected >= cfg.number_of_steps_after_success + + frame["next.done"] = np.array([really_done], dtype=bool) + frame["complementary_info.discrete_penalty"] = torch.tensor( + [info.get("discrete_penalty", 0.0)], dtype=torch.float32 + ) + dataset.add_frame(frame, task=cfg.task) + + # Maintain consistent timing + if cfg.fps: + dt_s = time.perf_counter() - start_loop_t + busy_wait(1 / cfg.fps - dt_s) + + # Check if we should end the episode + if (terminated or truncated) and not success_detected: + # Regular termination without success + break + elif success_detected and success_steps_collected >= cfg.number_of_steps_after_success: + # We've collected enough success states + logging.info(f"Collected {success_steps_collected} additional success states") + break + + # Handle episode recording + if info.get("rerecord_episode", False): + dataset.clear_episode_buffer() + logging.info(f"Re-recording episode {episode_index}") + continue + + dataset.save_episode() + episode_index += 1 + + # Finalize dataset + # dataset.consolidate(run_compute_stats=True) + if cfg.push_to_hub: + dataset.push_to_hub() + + +def replay_episode(env, cfg): + """ + Replay a recorded episode in the environment. + + This function loads actions from a previously recorded episode + and executes them in the environment. + + Args: + env: The environment to replay in. + cfg: Configuration object containing replay parameters: + - repo_id: Repository ID for dataset + - dataset_root: Local root directory for dataset + - episode: Episode ID to replay + """ + from lerobot.common.datasets.lerobot_dataset import LeRobotDataset + + dataset = LeRobotDataset(cfg.repo_id, root=cfg.dataset_root, episodes=[cfg.episode]) + env.reset() + + actions = dataset.hf_dataset.select_columns("action") + + for idx in range(dataset.num_frames): + start_episode_t = time.perf_counter() + + action = actions[idx]["action"] + env.step(action) + + dt_s = time.perf_counter() - start_episode_t + busy_wait(1 / 10 - dt_s) + + +@parser.wrap() +def main(cfg: EnvConfig): + """Main entry point for the robot environment script. + + This function runs the robot environment in one of several modes + based on the provided configuration. + + Args: + cfg: Configuration object defining the run parameters, + including mode (record, replay, random) and other settings. + """ + env = make_robot_env(cfg) + + if cfg.mode == "record": + policy = None + if cfg.pretrained_policy_name_or_path is not None: + from lerobot.common.policies.sac.modeling_sac import SACPolicy + + policy = SACPolicy.from_pretrained(cfg.pretrained_policy_name_or_path) + policy.to(cfg.device) + policy.eval() + + record_dataset( + env, + policy=policy, + cfg=cfg, + ) + exit() + + if cfg.mode == "replay": + replay_episode( + env, + cfg=cfg, + ) + exit() + + env.reset() + + # Initialize the smoothed action as a random sample. + smoothed_action = env.action_space.sample() * 0.0 + + # Smoothing coefficient (alpha) defines how much of the new random sample to mix in. + # A value close to 0 makes the trajectory very smooth (slow to change), while a value close to 1 is less smooth. + alpha = 1.0 + + num_episode = 0 + successes = [] + while num_episode < 10: + start_loop_s = time.perf_counter() + # Sample a new random action from the robot's action space. + new_random_action = env.action_space.sample() + # Update the smoothed action using an exponential moving average. + smoothed_action = alpha * new_random_action + (1 - alpha) * smoothed_action + + # Execute the step: wrap the NumPy action in a torch tensor. + obs, reward, terminated, truncated, info = env.step(smoothed_action) + if terminated or truncated: + successes.append(reward) + env.reset() + num_episode += 1 + + dt_s = time.perf_counter() - start_loop_s + busy_wait(1 / cfg.fps - dt_s) + + logging.info(f"Success after 20 steps {successes}") + logging.info(f"success rate {sum(successes) / len(successes)}") + + +if __name__ == "__main__": + main() diff --git a/lerobot/scripts/rl/learner.py b/lerobot/scripts/rl/learner.py new file mode 100644 index 0000000000..2d2c3755a9 --- /dev/null +++ b/lerobot/scripts/rl/learner.py @@ -0,0 +1,1206 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Learner server runner for distributed HILSerl robot policy training. + +This script implements the learner component of the distributed HILSerl architecture. +It initializes the policy network, maintains replay buffers, and updates +the policy based on transitions received from the actor server. + +Examples of usage: + +- Start a learner server for training: +```bash +python lerobot/scripts/rl/learner.py --config_path lerobot/configs/train_config_hilserl_so100.json +``` + +**NOTE**: Start the learner server before launching the actor server. The learner opens a gRPC server +to communicate with actors. + +**NOTE**: Training progress can be monitored through Weights & Biases if wandb.enable is set to true +in your configuration. + +**WORKFLOW**: +1. Create training configuration with proper policy, dataset, and environment settings +2. Start this learner server with the configuration +3. Start an actor server with the same configuration +4. Monitor training progress through wandb dashboard + +For more details on the complete HILSerl training workflow, see: +https://github.com/michel-aractingi/lerobot-hilserl-guide +""" + +import logging +import os +import shutil +import time +from concurrent.futures import ThreadPoolExecutor +from pathlib import Path +from pprint import pformat + +import grpc +import torch +from termcolor import colored +from torch import nn +from torch.multiprocessing import Queue +from torch.optim.optimizer import Optimizer + +from lerobot.common.cameras import opencv # noqa: F401 +from lerobot.common.constants import ( + CHECKPOINTS_DIR, + LAST_CHECKPOINT_LINK, + PRETRAINED_MODEL_DIR, + TRAINING_STATE_DIR, +) +from lerobot.common.datasets.factory import make_dataset +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.policies.factory import make_policy +from lerobot.common.policies.sac.modeling_sac import SACPolicy +from lerobot.common.robots import so100_follower # noqa: F401 +from lerobot.common.teleoperators import gamepad, so100_leader # noqa: F401 +from lerobot.common.transport import services_pb2_grpc +from lerobot.common.transport.utils import ( + bytes_to_python_object, + bytes_to_transitions, + state_to_bytes, +) +from lerobot.common.utils.buffer import ReplayBuffer, concatenate_batch_transitions +from lerobot.common.utils.process import ProcessSignalHandler +from lerobot.common.utils.random_utils import set_seed +from lerobot.common.utils.train_utils import ( + get_step_checkpoint_dir, + save_checkpoint, + update_last_checkpoint, +) +from lerobot.common.utils.train_utils import ( + load_training_state as utils_load_training_state, +) +from lerobot.common.utils.transition import move_state_dict_to_device, move_transition_to_device +from lerobot.common.utils.utils import ( + format_big_number, + get_safe_torch_device, + init_logging, +) +from lerobot.common.utils.wandb_utils import WandBLogger +from lerobot.configs import parser +from lerobot.configs.train import TrainRLServerPipelineConfig +from lerobot.scripts.rl import learner_service + +LOG_PREFIX = "[LEARNER]" + + +################################################# +# MAIN ENTRY POINTS AND CORE ALGORITHM FUNCTIONS # +################################################# + + +@parser.wrap() +def train_cli(cfg: TrainRLServerPipelineConfig): + if not use_threads(cfg): + import torch.multiprocessing as mp + + mp.set_start_method("spawn") + + # Use the job_name from the config + train( + cfg, + job_name=cfg.job_name, + ) + + logging.info("[LEARNER] train_cli finished") + + +def train(cfg: TrainRLServerPipelineConfig, job_name: str | None = None): + """ + Main training function that initializes and runs the training process. + + Args: + cfg (TrainRLServerPipelineConfig): The training configuration + job_name (str | None, optional): Job name for logging. Defaults to None. + """ + + cfg.validate() + + if job_name is None: + job_name = cfg.job_name + + if job_name is None: + raise ValueError("Job name must be specified either in config or as a parameter") + + display_pid = False + if not use_threads(cfg): + display_pid = True + + # Create logs directory to ensure it exists + log_dir = os.path.join(cfg.output_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"learner_{job_name}.log") + + # Initialize logging with explicit log file + init_logging(log_file=log_file, display_pid=display_pid) + logging.info(f"Learner logging initialized, writing to {log_file}") + logging.info(pformat(cfg.to_dict())) + + # Setup WandB logging if enabled + if cfg.wandb.enable and cfg.wandb.project: + from lerobot.common.utils.wandb_utils import WandBLogger + + wandb_logger = WandBLogger(cfg) + else: + wandb_logger = None + logging.info(colored("Logs will be saved locally.", "yellow", attrs=["bold"])) + + # Handle resume logic + cfg = handle_resume_logic(cfg) + + set_seed(seed=cfg.seed) + + torch.backends.cudnn.benchmark = True + torch.backends.cuda.matmul.allow_tf32 = True + + is_threaded = use_threads(cfg) + shutdown_event = ProcessSignalHandler(is_threaded, display_pid=display_pid).shutdown_event + + start_learner_threads( + cfg=cfg, + wandb_logger=wandb_logger, + shutdown_event=shutdown_event, + ) + + +def start_learner_threads( + cfg: TrainRLServerPipelineConfig, + wandb_logger: WandBLogger | None, + shutdown_event: any, # Event, +) -> None: + """ + Start the learner threads for training. + + Args: + cfg (TrainRLServerPipelineConfig): Training configuration + wandb_logger (WandBLogger | None): Logger for metrics + shutdown_event: Event to signal shutdown + """ + # Create multiprocessing queues + transition_queue = Queue() + interaction_message_queue = Queue() + parameters_queue = Queue() + + concurrency_entity = None + + if use_threads(cfg): + from threading import Thread + + concurrency_entity = Thread + else: + from torch.multiprocessing import Process + + concurrency_entity = Process + + communication_process = concurrency_entity( + target=start_learner, + args=( + parameters_queue, + transition_queue, + interaction_message_queue, + shutdown_event, + cfg, + ), + daemon=True, + ) + communication_process.start() + + add_actor_information_and_train( + cfg=cfg, + wandb_logger=wandb_logger, + shutdown_event=shutdown_event, + transition_queue=transition_queue, + interaction_message_queue=interaction_message_queue, + parameters_queue=parameters_queue, + ) + logging.info("[LEARNER] Training process stopped") + + logging.info("[LEARNER] Closing queues") + transition_queue.close() + interaction_message_queue.close() + parameters_queue.close() + + communication_process.join() + logging.info("[LEARNER] Communication process joined") + + logging.info("[LEARNER] join queues") + transition_queue.cancel_join_thread() + interaction_message_queue.cancel_join_thread() + parameters_queue.cancel_join_thread() + + logging.info("[LEARNER] queues closed") + + +################################################# +# Core algorithm functions # +################################################# + + +def add_actor_information_and_train( + cfg: TrainRLServerPipelineConfig, + wandb_logger: WandBLogger | None, + shutdown_event: any, # Event, + transition_queue: Queue, + interaction_message_queue: Queue, + parameters_queue: Queue, +): + """ + Handles data transfer from the actor to the learner, manages training updates, + and logs training progress in an online reinforcement learning setup. + + This function continuously: + - Transfers transitions from the actor to the replay buffer. + - Logs received interaction messages. + - Ensures training begins only when the replay buffer has a sufficient number of transitions. + - Samples batches from the replay buffer and performs multiple critic updates. + - Periodically updates the actor, critic, and temperature optimizers. + - Logs training statistics, including loss values and optimization frequency. + + NOTE: This function doesn't have a single responsibility, it should be split into multiple functions + in the future. The reason why we did that is the GIL in Python. It's super slow the performance + are divided by 200. So we need to have a single thread that does all the work. + + Args: + cfg (TrainRLServerPipelineConfig): Configuration object containing hyperparameters. + wandb_logger (WandBLogger | None): Logger for tracking training progress. + shutdown_event (Event): Event to signal shutdown. + transition_queue (Queue): Queue for receiving transitions from the actor. + interaction_message_queue (Queue): Queue for receiving interaction messages from the actor. + parameters_queue (Queue): Queue for sending policy parameters to the actor. + """ + # Extract all configuration variables at the beginning, it improve the speed performance + # of 7% + device = get_safe_torch_device(try_device=cfg.policy.device, log=True) + storage_device = get_safe_torch_device(try_device=cfg.policy.storage_device) + clip_grad_norm_value = cfg.policy.grad_clip_norm + online_step_before_learning = cfg.policy.online_step_before_learning + utd_ratio = cfg.policy.utd_ratio + fps = cfg.env.fps + log_freq = cfg.log_freq + save_freq = cfg.save_freq + policy_update_freq = cfg.policy.policy_update_freq + policy_parameters_push_frequency = cfg.policy.actor_learner_config.policy_parameters_push_frequency + saving_checkpoint = cfg.save_checkpoint + online_steps = cfg.policy.online_steps + async_prefetch = cfg.policy.async_prefetch + + # Initialize logging for multiprocessing + if not use_threads(cfg): + log_dir = os.path.join(cfg.output_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"learner_train_process_{os.getpid()}.log") + init_logging(log_file=log_file, display_pid=True) + logging.info("Initialized logging for actor information and training process") + + logging.info("Initializing policy") + + policy: SACPolicy = make_policy( + cfg=cfg.policy, + env_cfg=cfg.env, + ) + + assert isinstance(policy, nn.Module) + + policy.train() + + push_actor_policy_to_queue(parameters_queue=parameters_queue, policy=policy) + + last_time_policy_pushed = time.time() + + optimizers, lr_scheduler = make_optimizers_and_scheduler(cfg=cfg, policy=policy) + + # If we are resuming, we need to load the training state + resume_optimization_step, resume_interaction_step = load_training_state(cfg=cfg, optimizers=optimizers) + + log_training_info(cfg=cfg, policy=policy) + + replay_buffer = initialize_replay_buffer(cfg, device, storage_device) + batch_size = cfg.batch_size + offline_replay_buffer = None + + if cfg.dataset is not None: + offline_replay_buffer = initialize_offline_replay_buffer( + cfg=cfg, + device=device, + storage_device=storage_device, + ) + batch_size: int = batch_size // 2 # We will sample from both replay buffer + + logging.info("Starting learner thread") + interaction_message = None + optimization_step = resume_optimization_step if resume_optimization_step is not None else 0 + interaction_step_shift = resume_interaction_step if resume_interaction_step is not None else 0 + + dataset_repo_id = None + if cfg.dataset is not None: + dataset_repo_id = cfg.dataset.repo_id + + # Initialize iterators + online_iterator = None + offline_iterator = None + + # NOTE: THIS IS THE MAIN LOOP OF THE LEARNER + while True: + # Exit the training loop if shutdown is requested + if shutdown_event is not None and shutdown_event.is_set(): + logging.info("[LEARNER] Shutdown signal received. Exiting...") + break + + # Process all available transitions to the replay buffer, send by the actor server + process_transitions( + transition_queue=transition_queue, + replay_buffer=replay_buffer, + offline_replay_buffer=offline_replay_buffer, + device=device, + dataset_repo_id=dataset_repo_id, + shutdown_event=shutdown_event, + ) + + # Process all available interaction messages sent by the actor server + interaction_message = process_interaction_messages( + interaction_message_queue=interaction_message_queue, + interaction_step_shift=interaction_step_shift, + wandb_logger=wandb_logger, + shutdown_event=shutdown_event, + ) + + # Wait until the replay buffer has enough samples to start training + if len(replay_buffer) < online_step_before_learning: + continue + + if online_iterator is None: + online_iterator = replay_buffer.get_iterator( + batch_size=batch_size, async_prefetch=async_prefetch, queue_size=2 + ) + + if offline_replay_buffer is not None and offline_iterator is None: + offline_iterator = offline_replay_buffer.get_iterator( + batch_size=batch_size, async_prefetch=async_prefetch, queue_size=2 + ) + + time_for_one_optimization_step = time.time() + for _ in range(utd_ratio - 1): + # Sample from the iterators + batch = next(online_iterator) + + if dataset_repo_id is not None: + batch_offline = next(offline_iterator) + batch = concatenate_batch_transitions( + left_batch_transitions=batch, right_batch_transition=batch_offline + ) + + actions = batch["action"] + rewards = batch["reward"] + observations = batch["state"] + next_observations = batch["next_state"] + done = batch["done"] + check_nan_in_transition(observations=observations, actions=actions, next_state=next_observations) + + observation_features, next_observation_features = get_observation_features( + policy=policy, observations=observations, next_observations=next_observations + ) + + # Create a batch dictionary with all required elements for the forward method + forward_batch = { + "action": actions, + "reward": rewards, + "state": observations, + "next_state": next_observations, + "done": done, + "observation_feature": observation_features, + "next_observation_feature": next_observation_features, + "complementary_info": batch["complementary_info"], + } + + # Use the forward method for critic loss + critic_output = policy.forward(forward_batch, model="critic") + + # Main critic optimization + loss_critic = critic_output["loss_critic"] + optimizers["critic"].zero_grad() + loss_critic.backward() + critic_grad_norm = torch.nn.utils.clip_grad_norm_( + parameters=policy.critic_ensemble.parameters(), max_norm=clip_grad_norm_value + ) + optimizers["critic"].step() + + # Discrete critic optimization (if available) + if policy.config.num_discrete_actions is not None: + discrete_critic_output = policy.forward(forward_batch, model="discrete_critic") + loss_discrete_critic = discrete_critic_output["loss_discrete_critic"] + optimizers["discrete_critic"].zero_grad() + loss_discrete_critic.backward() + discrete_critic_grad_norm = torch.nn.utils.clip_grad_norm_( + parameters=policy.discrete_critic.parameters(), max_norm=clip_grad_norm_value + ) + optimizers["discrete_critic"].step() + + # Update target networks (main and discrete) + policy.update_target_networks() + + # Sample for the last update in the UTD ratio + batch = next(online_iterator) + + if dataset_repo_id is not None: + batch_offline = next(offline_iterator) + batch = concatenate_batch_transitions( + left_batch_transitions=batch, right_batch_transition=batch_offline + ) + + actions = batch["action"] + rewards = batch["reward"] + observations = batch["state"] + next_observations = batch["next_state"] + done = batch["done"] + + check_nan_in_transition(observations=observations, actions=actions, next_state=next_observations) + + observation_features, next_observation_features = get_observation_features( + policy=policy, observations=observations, next_observations=next_observations + ) + + # Create a batch dictionary with all required elements for the forward method + forward_batch = { + "action": actions, + "reward": rewards, + "state": observations, + "next_state": next_observations, + "done": done, + "observation_feature": observation_features, + "next_observation_feature": next_observation_features, + } + + critic_output = policy.forward(forward_batch, model="critic") + + loss_critic = critic_output["loss_critic"] + optimizers["critic"].zero_grad() + loss_critic.backward() + critic_grad_norm = torch.nn.utils.clip_grad_norm_( + parameters=policy.critic_ensemble.parameters(), max_norm=clip_grad_norm_value + ).item() + optimizers["critic"].step() + + # Initialize training info dictionary + training_infos = { + "loss_critic": loss_critic.item(), + "critic_grad_norm": critic_grad_norm, + } + + # Discrete critic optimization (if available) + if policy.config.num_discrete_actions is not None: + discrete_critic_output = policy.forward(forward_batch, model="discrete_critic") + loss_discrete_critic = discrete_critic_output["loss_discrete_critic"] + optimizers["discrete_critic"].zero_grad() + loss_discrete_critic.backward() + discrete_critic_grad_norm = torch.nn.utils.clip_grad_norm_( + parameters=policy.discrete_critic.parameters(), max_norm=clip_grad_norm_value + ).item() + optimizers["discrete_critic"].step() + + # Add discrete critic info to training info + training_infos["loss_discrete_critic"] = loss_discrete_critic.item() + training_infos["discrete_critic_grad_norm"] = discrete_critic_grad_norm + + # Actor and temperature optimization (at specified frequency) + if optimization_step % policy_update_freq == 0: + for _ in range(policy_update_freq): + # Actor optimization + actor_output = policy.forward(forward_batch, model="actor") + loss_actor = actor_output["loss_actor"] + optimizers["actor"].zero_grad() + loss_actor.backward() + actor_grad_norm = torch.nn.utils.clip_grad_norm_( + parameters=policy.actor.parameters(), max_norm=clip_grad_norm_value + ).item() + optimizers["actor"].step() + + # Add actor info to training info + training_infos["loss_actor"] = loss_actor.item() + training_infos["actor_grad_norm"] = actor_grad_norm + + # Temperature optimization + temperature_output = policy.forward(forward_batch, model="temperature") + loss_temperature = temperature_output["loss_temperature"] + optimizers["temperature"].zero_grad() + loss_temperature.backward() + temp_grad_norm = torch.nn.utils.clip_grad_norm_( + parameters=[policy.log_alpha], max_norm=clip_grad_norm_value + ).item() + optimizers["temperature"].step() + + # Add temperature info to training info + training_infos["loss_temperature"] = loss_temperature.item() + training_infos["temperature_grad_norm"] = temp_grad_norm + training_infos["temperature"] = policy.temperature + + # Update temperature + policy.update_temperature() + + # Push policy to actors if needed + if time.time() - last_time_policy_pushed > policy_parameters_push_frequency: + push_actor_policy_to_queue(parameters_queue=parameters_queue, policy=policy) + last_time_policy_pushed = time.time() + + # Update target networks (main and discrete) + policy.update_target_networks() + + # Log training metrics at specified intervals + if optimization_step % log_freq == 0: + training_infos["replay_buffer_size"] = len(replay_buffer) + if offline_replay_buffer is not None: + training_infos["offline_replay_buffer_size"] = len(offline_replay_buffer) + training_infos["Optimization step"] = optimization_step + + # Log training metrics + if wandb_logger: + wandb_logger.log_dict(d=training_infos, mode="train", custom_step_key="Optimization step") + + # Calculate and log optimization frequency + time_for_one_optimization_step = time.time() - time_for_one_optimization_step + frequency_for_one_optimization_step = 1 / (time_for_one_optimization_step + 1e-9) + + logging.info(f"[LEARNER] Optimization frequency loop [Hz]: {frequency_for_one_optimization_step}") + + # Log optimization frequency + if wandb_logger: + wandb_logger.log_dict( + { + "Optimization frequency loop [Hz]": frequency_for_one_optimization_step, + "Optimization step": optimization_step, + }, + mode="train", + custom_step_key="Optimization step", + ) + + optimization_step += 1 + if optimization_step % log_freq == 0: + logging.info(f"[LEARNER] Number of optimization step: {optimization_step}") + + # Save checkpoint at specified intervals + if saving_checkpoint and (optimization_step % save_freq == 0 or optimization_step == online_steps): + save_training_checkpoint( + cfg=cfg, + optimization_step=optimization_step, + online_steps=online_steps, + interaction_message=interaction_message, + policy=policy, + optimizers=optimizers, + replay_buffer=replay_buffer, + offline_replay_buffer=offline_replay_buffer, + dataset_repo_id=dataset_repo_id, + fps=fps, + ) + + +def start_learner( + parameters_queue: Queue, + transition_queue: Queue, + interaction_message_queue: Queue, + shutdown_event: any, # Event, + cfg: TrainRLServerPipelineConfig, +): + """ + Start the learner server for training. + It will receive transitions and interaction messages from the actor server, + and send policy parameters to the actor server. + + Args: + parameters_queue: Queue for sending policy parameters to the actor + transition_queue: Queue for receiving transitions from the actor + interaction_message_queue: Queue for receiving interaction messages from the actor + shutdown_event: Event to signal shutdown + cfg: Training configuration + """ + if not use_threads(cfg): + # Create a process-specific log file + log_dir = os.path.join(cfg.output_dir, "logs") + os.makedirs(log_dir, exist_ok=True) + log_file = os.path.join(log_dir, f"learner_process_{os.getpid()}.log") + + # Initialize logging with explicit log file + init_logging(log_file=log_file, display_pid=True) + logging.info("Learner server process logging initialized") + + # Setup process handlers to handle shutdown signal + # But use shutdown event from the main process + # Return back for MP + # TODO: Check if its useful + _ = ProcessSignalHandler(False, display_pid=True) + + service = learner_service.LearnerService( + shutdown_event=shutdown_event, + parameters_queue=parameters_queue, + seconds_between_pushes=cfg.policy.actor_learner_config.policy_parameters_push_frequency, + transition_queue=transition_queue, + interaction_message_queue=interaction_message_queue, + queue_get_timeout=cfg.policy.actor_learner_config.queue_get_timeout, + ) + + server = grpc.server( + ThreadPoolExecutor(max_workers=learner_service.MAX_WORKERS), + options=[ + ("grpc.max_receive_message_length", learner_service.MAX_MESSAGE_SIZE), + ("grpc.max_send_message_length", learner_service.MAX_MESSAGE_SIZE), + ], + ) + + services_pb2_grpc.add_LearnerServiceServicer_to_server( + service, + server, + ) + + host = cfg.policy.actor_learner_config.learner_host + port = cfg.policy.actor_learner_config.learner_port + + server.add_insecure_port(f"{host}:{port}") + server.start() + logging.info("[LEARNER] gRPC server started") + + shutdown_event.wait() + logging.info("[LEARNER] Stopping gRPC server...") + server.stop(learner_service.SHUTDOWN_TIMEOUT) + logging.info("[LEARNER] gRPC server stopped") + + +def save_training_checkpoint( + cfg: TrainRLServerPipelineConfig, + optimization_step: int, + online_steps: int, + interaction_message: dict | None, + policy: nn.Module, + optimizers: dict[str, Optimizer], + replay_buffer: ReplayBuffer, + offline_replay_buffer: ReplayBuffer | None = None, + dataset_repo_id: str | None = None, + fps: int = 30, +) -> None: + """ + Save training checkpoint and associated data. + + This function performs the following steps: + 1. Creates a checkpoint directory with the current optimization step + 2. Saves the policy model, configuration, and optimizer states + 3. Saves the current interaction step for resuming training + 4. Updates the "last" checkpoint symlink to point to this checkpoint + 5. Saves the replay buffer as a dataset for later use + 6. If an offline replay buffer exists, saves it as a separate dataset + + Args: + cfg: Training configuration + optimization_step: Current optimization step + online_steps: Total number of online steps + interaction_message: Dictionary containing interaction information + policy: Policy model to save + optimizers: Dictionary of optimizers + replay_buffer: Replay buffer to save as dataset + offline_replay_buffer: Optional offline replay buffer to save + dataset_repo_id: Repository ID for dataset + fps: Frames per second for dataset + """ + logging.info(f"Checkpoint policy after step {optimization_step}") + _num_digits = max(6, len(str(online_steps))) + interaction_step = interaction_message["Interaction step"] if interaction_message is not None else 0 + + # Create checkpoint directory + checkpoint_dir = get_step_checkpoint_dir(cfg.output_dir, online_steps, optimization_step) + + # Save checkpoint + save_checkpoint( + checkpoint_dir=checkpoint_dir, + step=optimization_step, + cfg=cfg, + policy=policy, + optimizer=optimizers, + scheduler=None, + ) + + # Save interaction step manually + training_state_dir = os.path.join(checkpoint_dir, TRAINING_STATE_DIR) + os.makedirs(training_state_dir, exist_ok=True) + training_state = {"step": optimization_step, "interaction_step": interaction_step} + torch.save(training_state, os.path.join(training_state_dir, "training_state.pt")) + + # Update the "last" symlink + update_last_checkpoint(checkpoint_dir) + + # TODO : temporary save replay buffer here, remove later when on the robot + # We want to control this with the keyboard inputs + dataset_dir = os.path.join(cfg.output_dir, "dataset") + if os.path.exists(dataset_dir) and os.path.isdir(dataset_dir): + shutil.rmtree(dataset_dir) + + # Save dataset + # NOTE: Handle the case where the dataset repo id is not specified in the config + # eg. RL training without demonstrations data + repo_id_buffer_save = cfg.env.task if dataset_repo_id is None else dataset_repo_id + replay_buffer.to_lerobot_dataset(repo_id=repo_id_buffer_save, fps=fps, root=dataset_dir) + + if offline_replay_buffer is not None: + dataset_offline_dir = os.path.join(cfg.output_dir, "dataset_offline") + if os.path.exists(dataset_offline_dir) and os.path.isdir(dataset_offline_dir): + shutil.rmtree(dataset_offline_dir) + + offline_replay_buffer.to_lerobot_dataset( + cfg.dataset.repo_id, + fps=fps, + root=dataset_offline_dir, + ) + + logging.info("Resume training") + + +def make_optimizers_and_scheduler(cfg: TrainRLServerPipelineConfig, policy: nn.Module): + """ + Creates and returns optimizers for the actor, critic, and temperature components of a reinforcement learning policy. + + This function sets up Adam optimizers for: + - The **actor network**, ensuring that only relevant parameters are optimized. + - The **critic ensemble**, which evaluates the value function. + - The **temperature parameter**, which controls the entropy in soft actor-critic (SAC)-like methods. + + It also initializes a learning rate scheduler, though currently, it is set to `None`. + + NOTE: + - If the encoder is shared, its parameters are excluded from the actor's optimization process. + - The policy's log temperature (`log_alpha`) is wrapped in a list to ensure proper optimization as a standalone tensor. + + Args: + cfg: Configuration object containing hyperparameters. + policy (nn.Module): The policy model containing the actor, critic, and temperature components. + + Returns: + Tuple[Dict[str, torch.optim.Optimizer], Optional[torch.optim.lr_scheduler._LRScheduler]]: + A tuple containing: + - `optimizers`: A dictionary mapping component names ("actor", "critic", "temperature") to their respective Adam optimizers. + - `lr_scheduler`: Currently set to `None` but can be extended to support learning rate scheduling. + + """ + optimizer_actor = torch.optim.Adam( + params=[ + p + for n, p in policy.actor.named_parameters() + if not policy.config.shared_encoder or not n.startswith("encoder") + ], + lr=cfg.policy.actor_lr, + ) + optimizer_critic = torch.optim.Adam(params=policy.critic_ensemble.parameters(), lr=cfg.policy.critic_lr) + + if cfg.policy.num_discrete_actions is not None: + optimizer_discrete_critic = torch.optim.Adam( + params=policy.discrete_critic.parameters(), lr=cfg.policy.critic_lr + ) + optimizer_temperature = torch.optim.Adam(params=[policy.log_alpha], lr=cfg.policy.critic_lr) + lr_scheduler = None + optimizers = { + "actor": optimizer_actor, + "critic": optimizer_critic, + "temperature": optimizer_temperature, + } + if cfg.policy.num_discrete_actions is not None: + optimizers["discrete_critic"] = optimizer_discrete_critic + return optimizers, lr_scheduler + + +################################################# +# Training setup functions # +################################################# + + +def handle_resume_logic(cfg: TrainRLServerPipelineConfig) -> TrainRLServerPipelineConfig: + """ + Handle the resume logic for training. + + If resume is True: + - Verifies that a checkpoint exists + - Loads the checkpoint configuration + - Logs resumption details + - Returns the checkpoint configuration + + If resume is False: + - Checks if an output directory exists (to prevent accidental overwriting) + - Returns the original configuration + + Args: + cfg (TrainRLServerPipelineConfig): The training configuration + + Returns: + TrainRLServerPipelineConfig: The updated configuration + + Raises: + RuntimeError: If resume is True but no checkpoint found, or if resume is False but directory exists + """ + out_dir = cfg.output_dir + + # Case 1: Not resuming, but need to check if directory exists to prevent overwrites + if not cfg.resume: + checkpoint_dir = os.path.join(out_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK) + if os.path.exists(checkpoint_dir): + raise RuntimeError( + f"Output directory {checkpoint_dir} already exists. Use `resume=true` to resume training." + ) + return cfg + + # Case 2: Resuming training + checkpoint_dir = os.path.join(out_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK) + if not os.path.exists(checkpoint_dir): + raise RuntimeError(f"No model checkpoint found in {checkpoint_dir} for resume=True") + + # Log that we found a valid checkpoint and are resuming + logging.info( + colored( + "Valid checkpoint found: resume=True detected, resuming previous run", + color="yellow", + attrs=["bold"], + ) + ) + + # Load config using Draccus + checkpoint_cfg_path = os.path.join(checkpoint_dir, PRETRAINED_MODEL_DIR, "train_config.json") + checkpoint_cfg = TrainRLServerPipelineConfig.from_pretrained(checkpoint_cfg_path) + + # Ensure resume flag is set in returned config + checkpoint_cfg.resume = True + return checkpoint_cfg + + +def load_training_state( + cfg: TrainRLServerPipelineConfig, + optimizers: Optimizer | dict[str, Optimizer], +): + """ + Loads the training state (optimizers, step count, etc.) from a checkpoint. + + Args: + cfg (TrainRLServerPipelineConfig): Training configuration + optimizers (Optimizer | dict): Optimizers to load state into + + Returns: + tuple: (optimization_step, interaction_step) or (None, None) if not resuming + """ + if not cfg.resume: + return None, None + + # Construct path to the last checkpoint directory + checkpoint_dir = os.path.join(cfg.output_dir, CHECKPOINTS_DIR, LAST_CHECKPOINT_LINK) + + logging.info(f"Loading training state from {checkpoint_dir}") + + try: + # Use the utility function from train_utils which loads the optimizer state + step, optimizers, _ = utils_load_training_state(Path(checkpoint_dir), optimizers, None) + + # Load interaction step separately from training_state.pt + training_state_path = os.path.join(checkpoint_dir, TRAINING_STATE_DIR, "training_state.pt") + interaction_step = 0 + if os.path.exists(training_state_path): + training_state = torch.load(training_state_path, weights_only=False) # nosec B614: Safe usage of torch.load + interaction_step = training_state.get("interaction_step", 0) + + logging.info(f"Resuming from step {step}, interaction step {interaction_step}") + return step, interaction_step + + except Exception as e: + logging.error(f"Failed to load training state: {e}") + return None, None + + +def log_training_info(cfg: TrainRLServerPipelineConfig, policy: nn.Module) -> None: + """ + Log information about the training process. + + Args: + cfg (TrainRLServerPipelineConfig): Training configuration + policy (nn.Module): Policy model + """ + num_learnable_params = sum(p.numel() for p in policy.parameters() if p.requires_grad) + num_total_params = sum(p.numel() for p in policy.parameters()) + + logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}") + logging.info(f"{cfg.env.task=}") + logging.info(f"{cfg.policy.online_steps=}") + logging.info(f"{num_learnable_params=} ({format_big_number(num_learnable_params)})") + logging.info(f"{num_total_params=} ({format_big_number(num_total_params)})") + + +def initialize_replay_buffer( + cfg: TrainRLServerPipelineConfig, device: str, storage_device: str +) -> ReplayBuffer: + """ + Initialize a replay buffer, either empty or from a dataset if resuming. + + Args: + cfg (TrainRLServerPipelineConfig): Training configuration + device (str): Device to store tensors on + storage_device (str): Device for storage optimization + + Returns: + ReplayBuffer: Initialized replay buffer + """ + if not cfg.resume: + return ReplayBuffer( + capacity=cfg.policy.online_buffer_capacity, + device=device, + state_keys=cfg.policy.input_features.keys(), + storage_device=storage_device, + optimize_memory=True, + ) + + logging.info("Resume training load the online dataset") + dataset_path = os.path.join(cfg.output_dir, "dataset") + + # NOTE: In RL is possible to not have a dataset. + repo_id = None + if cfg.dataset is not None: + repo_id = cfg.dataset.repo_id + dataset = LeRobotDataset( + repo_id=repo_id, + root=dataset_path, + ) + return ReplayBuffer.from_lerobot_dataset( + lerobot_dataset=dataset, + capacity=cfg.policy.online_buffer_capacity, + device=device, + state_keys=cfg.policy.input_features.keys(), + optimize_memory=True, + ) + + +def initialize_offline_replay_buffer( + cfg: TrainRLServerPipelineConfig, + device: str, + storage_device: str, +) -> ReplayBuffer: + """ + Initialize an offline replay buffer from a dataset. + + Args: + cfg (TrainRLServerPipelineConfig): Training configuration + device (str): Device to store tensors on + storage_device (str): Device for storage optimization + + Returns: + ReplayBuffer: Initialized offline replay buffer + """ + if not cfg.resume: + logging.info("make_dataset offline buffer") + offline_dataset = make_dataset(cfg) + else: + logging.info("load offline dataset") + dataset_offline_path = os.path.join(cfg.output_dir, "dataset_offline") + offline_dataset = LeRobotDataset( + repo_id=cfg.dataset.repo_id, + root=dataset_offline_path, + ) + + logging.info("Convert to a offline replay buffer") + offline_replay_buffer = ReplayBuffer.from_lerobot_dataset( + offline_dataset, + device=device, + state_keys=cfg.policy.input_features.keys(), + storage_device=storage_device, + optimize_memory=True, + capacity=cfg.policy.offline_buffer_capacity, + ) + return offline_replay_buffer + + +################################################# +# Utilities/Helpers functions # +################################################# + + +def get_observation_features( + policy: SACPolicy, observations: torch.Tensor, next_observations: torch.Tensor +) -> tuple[torch.Tensor | None, torch.Tensor | None]: + """ + Get observation features from the policy encoder. It act as cache for the observation features. + when the encoder is frozen, the observation features are not updated. + We can save compute by caching the observation features. + + Args: + policy: The policy model + observations: The current observations + next_observations: The next observations + + Returns: + tuple: observation_features, next_observation_features + """ + + if policy.config.vision_encoder_name is None or not policy.config.freeze_vision_encoder: + return None, None + + with torch.no_grad(): + observation_features = policy.actor.encoder.get_cached_image_features(observations, normalize=True) + next_observation_features = policy.actor.encoder.get_cached_image_features( + next_observations, normalize=True + ) + + return observation_features, next_observation_features + + +def use_threads(cfg: TrainRLServerPipelineConfig) -> bool: + return cfg.policy.concurrency.learner == "threads" + + +def check_nan_in_transition( + observations: torch.Tensor, + actions: torch.Tensor, + next_state: torch.Tensor, + raise_error: bool = False, +) -> bool: + """ + Check for NaN values in transition data. + + Args: + observations: Dictionary of observation tensors + actions: Action tensor + next_state: Dictionary of next state tensors + raise_error: If True, raises ValueError when NaN is detected + + Returns: + bool: True if NaN values were detected, False otherwise + """ + nan_detected = False + + # Check observations + for key, tensor in observations.items(): + if torch.isnan(tensor).any(): + logging.error(f"observations[{key}] contains NaN values") + nan_detected = True + if raise_error: + raise ValueError(f"NaN detected in observations[{key}]") + + # Check next state + for key, tensor in next_state.items(): + if torch.isnan(tensor).any(): + logging.error(f"next_state[{key}] contains NaN values") + nan_detected = True + if raise_error: + raise ValueError(f"NaN detected in next_state[{key}]") + + # Check actions + if torch.isnan(actions).any(): + logging.error("actions contains NaN values") + nan_detected = True + if raise_error: + raise ValueError("NaN detected in actions") + + return nan_detected + + +def push_actor_policy_to_queue(parameters_queue: Queue, policy: nn.Module): + logging.debug("[LEARNER] Pushing actor policy to the queue") + state_dict = move_state_dict_to_device(policy.actor.state_dict(), device="cpu") + state_bytes = state_to_bytes(state_dict) + parameters_queue.put(state_bytes) + + +def process_interaction_message( + message, interaction_step_shift: int, wandb_logger: WandBLogger | None = None +): + """Process a single interaction message with consistent handling.""" + message = bytes_to_python_object(message) + # Shift interaction step for consistency with checkpointed state + message["Interaction step"] += interaction_step_shift + + # Log if logger available + if wandb_logger: + wandb_logger.log_dict(d=message, mode="train", custom_step_key="Interaction step") + + return message + + +def process_transitions( + transition_queue: Queue, + replay_buffer: ReplayBuffer, + offline_replay_buffer: ReplayBuffer, + device: str, + dataset_repo_id: str | None, + shutdown_event: any, +): + """Process all available transitions from the queue. + + Args: + transition_queue: Queue for receiving transitions from the actor + replay_buffer: Replay buffer to add transitions to + offline_replay_buffer: Offline replay buffer to add transitions to + device: Device to move transitions to + dataset_repo_id: Repository ID for dataset + shutdown_event: Event to signal shutdown + """ + while not transition_queue.empty() and not shutdown_event.is_set(): + transition_list = transition_queue.get() + transition_list = bytes_to_transitions(buffer=transition_list) + + for transition in transition_list: + transition = move_transition_to_device(transition=transition, device=device) + + # Skip transitions with NaN values + if check_nan_in_transition( + observations=transition["state"], + actions=transition["action"], + next_state=transition["next_state"], + ): + logging.warning("[LEARNER] NaN detected in transition, skipping") + continue + + replay_buffer.add(**transition) + + # Add to offline buffer if it's an intervention + if dataset_repo_id is not None and transition.get("complementary_info", {}).get( + "is_intervention" + ): + offline_replay_buffer.add(**transition) + + +def process_interaction_messages( + interaction_message_queue: Queue, + interaction_step_shift: int, + wandb_logger: WandBLogger | None, + shutdown_event: any, +) -> dict | None: + """Process all available interaction messages from the queue. + + Args: + interaction_message_queue: Queue for receiving interaction messages + interaction_step_shift: Amount to shift interaction step by + wandb_logger: Logger for tracking progress + shutdown_event: Event to signal shutdown + + Returns: + dict | None: The last interaction message processed, or None if none were processed + """ + last_message = None + while not interaction_message_queue.empty() and not shutdown_event.is_set(): + message = interaction_message_queue.get() + last_message = process_interaction_message( + message=message, + interaction_step_shift=interaction_step_shift, + wandb_logger=wandb_logger, + ) + + return last_message + + +if __name__ == "__main__": + train_cli() + logging.info("[LEARNER] main finished") diff --git a/lerobot/scripts/rl/learner_service.py b/lerobot/scripts/rl/learner_service.py new file mode 100644 index 0000000000..f967d812cf --- /dev/null +++ b/lerobot/scripts/rl/learner_service.py @@ -0,0 +1,118 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from multiprocessing import Event, Queue + +from lerobot.common.transport import services_pb2, services_pb2_grpc +from lerobot.common.transport.utils import receive_bytes_in_chunks, send_bytes_in_chunks +from lerobot.common.utils.queue import get_last_item_from_queue + +MAX_MESSAGE_SIZE = 4 * 1024 * 1024 # 4 MB +MAX_WORKERS = 3 # Stream parameters, send transitions and interactions +SHUTDOWN_TIMEOUT = 10 + + +class LearnerService(services_pb2_grpc.LearnerServiceServicer): + """ + Implementation of the LearnerService gRPC service + This service is used to send parameters to the Actor and receive transitions and interactions from the Actor + check transport.proto for the gRPC service definition + """ + + def __init__( + self, + shutdown_event: Event, # type: ignore + parameters_queue: Queue, + seconds_between_pushes: float, + transition_queue: Queue, + interaction_message_queue: Queue, + queue_get_timeout: float = 0.001, + ): + self.shutdown_event = shutdown_event + self.parameters_queue = parameters_queue + self.seconds_between_pushes = seconds_between_pushes + self.transition_queue = transition_queue + self.interaction_message_queue = interaction_message_queue + self.queue_get_timeout = queue_get_timeout + + def StreamParameters(self, request, context): # noqa: N802 + # TODO: authorize the request + logging.info("[LEARNER] Received request to stream parameters from the Actor") + + last_push_time = 0 + + while not self.shutdown_event.is_set(): + time_since_last_push = time.time() - last_push_time + if time_since_last_push < self.seconds_between_pushes: + self.shutdown_event.wait(self.seconds_between_pushes - time_since_last_push) + # Continue, because we could receive a shutdown event, + # and it's checked in the while loop + continue + + logging.info("[LEARNER] Push parameters to the Actor") + buffer = get_last_item_from_queue( + self.parameters_queue, block=True, timeout=self.queue_get_timeout + ) + + if buffer is None: + continue + + yield from send_bytes_in_chunks( + buffer, + services_pb2.Parameters, + log_prefix="[LEARNER] Sending parameters", + silent=True, + ) + + last_push_time = time.time() + logging.info("[LEARNER] Parameters sent") + + logging.info("[LEARNER] Stream parameters finished") + return services_pb2.Empty() + + def SendTransitions(self, request_iterator, _context): # noqa: N802 + # TODO: authorize the request + logging.info("[LEARNER] Received request to receive transitions from the Actor") + + receive_bytes_in_chunks( + request_iterator, + self.transition_queue, + self.shutdown_event, + log_prefix="[LEARNER] transitions", + ) + + logging.debug("[LEARNER] Finished receiving transitions") + return services_pb2.Empty() + + def SendInteractions(self, request_iterator, _context): # noqa: N802 + # TODO: authorize the request + logging.info("[LEARNER] Received request to receive interactions from the Actor") + + receive_bytes_in_chunks( + request_iterator, + self.interaction_message_queue, + self.shutdown_event, + log_prefix="[LEARNER] interactions", + ) + + logging.debug("[LEARNER] Finished receiving interactions") + return services_pb2.Empty() + + def Ready(self, request, context): # noqa: N802 + return services_pb2.Empty() diff --git a/lerobot/teleoperate.py b/lerobot/teleoperate.py index 97e6104301..6080dfb403 100644 --- a/lerobot/teleoperate.py +++ b/lerobot/teleoperate.py @@ -58,7 +58,7 @@ from lerobot.common.utils.utils import init_logging, move_cursor_up from lerobot.common.utils.visualization_utils import _init_rerun -from .common.teleoperators import koch_leader, so100_leader, so101_leader # noqa: F401 +from .common.teleoperators import gamepad, koch_leader, so100_leader, so101_leader # noqa: F401 @dataclass diff --git a/pyproject.toml b/pyproject.toml index a99b1b16c4..1ebef75bff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,6 +68,7 @@ dependencies = [ "pyserial>=3.5", "pyzmq>=26.2.1", "rerun-sdk>=0.21.0", + "scipy>=1.14.0", "termcolor>=2.4.0", "torch>=2.2.1", "torchcodec>=0.2.1; sys_platform != 'win32' and (sys_platform != 'linux' or (platform_machine != 'aarch64' and platform_machine != 'arm64' and platform_machine != 'armv7l')) and (sys_platform != 'darwin' or platform_machine != 'x86_64')", @@ -97,7 +98,8 @@ stretch = [ "pyrender @ git+https://github.com/mmatl/pyrender.git ; sys_platform == 'linux'", "pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'" ] -test = ["pytest>=8.1.0", "pytest-cov>=5.0.0", "mock-serial>=0.0.1 ; sys_platform != 'win32'"] +test = ["pytest>=8.1.0", "pytest-timeout>=2.4.0", "pytest-cov>=5.0.0", "pyserial>=3.5", "mock-serial>=0.0.1 ; sys_platform != 'win32'"] +hilserl = ["transformers>=4.48", "gym-hil>=0.1.8", "protobuf>=5.29.3", "grpcio==1.71.0"] umi = ["imagecodecs>=2024.1.1"] video_benchmark = ["scikit-image>=0.23.2", "pandas>=2.2.2"] xarm = ["gym-xarm>=0.1.1 ; python_version < '4.0'"] @@ -108,7 +110,7 @@ requires-poetry = ">=2.1" [tool.ruff] line-length = 110 target-version = "py310" -exclude = ["tests/artifacts/**/*.safetensors"] +exclude = ["tests/artifacts/**/*.safetensors", "*_pb2.py", "*_pb2_grpc.py"] [tool.ruff.lint] select = ["E4", "E7", "E9", "F", "I", "N", "B", "C4", "SIM"] diff --git a/tests/optim/test_optimizers.py b/tests/optim/test_optimizers.py index 997e14fe94..630353fcaf 100644 --- a/tests/optim/test_optimizers.py +++ b/tests/optim/test_optimizers.py @@ -21,6 +21,7 @@ from lerobot.common.optim.optimizers import ( AdamConfig, AdamWConfig, + MultiAdamConfig, SGDConfig, load_optimizer_state, save_optimizer_state, @@ -33,13 +34,21 @@ (AdamConfig, torch.optim.Adam), (AdamWConfig, torch.optim.AdamW), (SGDConfig, torch.optim.SGD), + (MultiAdamConfig, dict), ], ) def test_optimizer_build(config_cls, expected_class, model_params): config = config_cls() - optimizer = config.build(model_params) - assert isinstance(optimizer, expected_class) - assert optimizer.defaults["lr"] == config.lr + if config_cls == MultiAdamConfig: + params_dict = {"default": model_params} + optimizer = config.build(params_dict) + assert isinstance(optimizer, expected_class) + assert isinstance(optimizer["default"], torch.optim.Adam) + assert optimizer["default"].defaults["lr"] == config.lr + else: + optimizer = config.build(model_params) + assert isinstance(optimizer, expected_class) + assert optimizer.defaults["lr"] == config.lr def test_save_optimizer_state(optimizer, tmp_path): @@ -54,3 +63,180 @@ def test_save_and_load_optimizer_state(model_params, optimizer, tmp_path): loaded_optimizer = load_optimizer_state(loaded_optimizer, tmp_path) torch.testing.assert_close(optimizer.state_dict(), loaded_optimizer.state_dict()) + + +@pytest.fixture +def base_params_dict(): + return { + "actor": [torch.nn.Parameter(torch.randn(10, 10))], + "critic": [torch.nn.Parameter(torch.randn(5, 5))], + "temperature": [torch.nn.Parameter(torch.randn(3, 3))], + } + + +@pytest.mark.parametrize( + "config_params, expected_values", + [ + # Test 1: Basic configuration with different learning rates + ( + { + "lr": 1e-3, + "weight_decay": 1e-4, + "optimizer_groups": { + "actor": {"lr": 1e-4}, + "critic": {"lr": 5e-4}, + "temperature": {"lr": 2e-3}, + }, + }, + { + "actor": {"lr": 1e-4, "weight_decay": 1e-4, "betas": (0.9, 0.999)}, + "critic": {"lr": 5e-4, "weight_decay": 1e-4, "betas": (0.9, 0.999)}, + "temperature": {"lr": 2e-3, "weight_decay": 1e-4, "betas": (0.9, 0.999)}, + }, + ), + # Test 2: Different weight decays and beta values + ( + { + "lr": 1e-3, + "weight_decay": 1e-4, + "optimizer_groups": { + "actor": {"lr": 1e-4, "weight_decay": 1e-5}, + "critic": {"lr": 5e-4, "weight_decay": 1e-6}, + "temperature": {"lr": 2e-3, "betas": (0.95, 0.999)}, + }, + }, + { + "actor": {"lr": 1e-4, "weight_decay": 1e-5, "betas": (0.9, 0.999)}, + "critic": {"lr": 5e-4, "weight_decay": 1e-6, "betas": (0.9, 0.999)}, + "temperature": {"lr": 2e-3, "weight_decay": 1e-4, "betas": (0.95, 0.999)}, + }, + ), + # Test 3: Epsilon parameter customization + ( + { + "lr": 1e-3, + "weight_decay": 1e-4, + "optimizer_groups": { + "actor": {"lr": 1e-4, "eps": 1e-6}, + "critic": {"lr": 5e-4, "eps": 1e-7}, + "temperature": {"lr": 2e-3, "eps": 1e-8}, + }, + }, + { + "actor": {"lr": 1e-4, "weight_decay": 1e-4, "betas": (0.9, 0.999), "eps": 1e-6}, + "critic": {"lr": 5e-4, "weight_decay": 1e-4, "betas": (0.9, 0.999), "eps": 1e-7}, + "temperature": {"lr": 2e-3, "weight_decay": 1e-4, "betas": (0.9, 0.999), "eps": 1e-8}, + }, + ), + ], +) +def test_multi_adam_configuration(base_params_dict, config_params, expected_values): + # Create config with the given parameters + config = MultiAdamConfig(**config_params) + optimizers = config.build(base_params_dict) + + # Verify optimizer count and keys + assert len(optimizers) == len(expected_values) + assert set(optimizers.keys()) == set(expected_values.keys()) + + # Check that all optimizers are Adam instances + for opt in optimizers.values(): + assert isinstance(opt, torch.optim.Adam) + + # Verify hyperparameters for each optimizer + for name, expected in expected_values.items(): + optimizer = optimizers[name] + for param, value in expected.items(): + assert optimizer.defaults[param] == value + + +@pytest.fixture +def multi_optimizers(base_params_dict): + config = MultiAdamConfig( + lr=1e-3, + optimizer_groups={ + "actor": {"lr": 1e-4}, + "critic": {"lr": 5e-4}, + "temperature": {"lr": 2e-3}, + }, + ) + return config.build(base_params_dict) + + +def test_save_multi_optimizer_state(multi_optimizers, tmp_path): + # Save optimizer states + save_optimizer_state(multi_optimizers, tmp_path) + + # Verify that directories were created for each optimizer + for name in multi_optimizers: + assert (tmp_path / name).is_dir() + assert (tmp_path / name / OPTIMIZER_STATE).is_file() + assert (tmp_path / name / OPTIMIZER_PARAM_GROUPS).is_file() + + +def test_save_and_load_multi_optimizer_state(base_params_dict, multi_optimizers, tmp_path): + # Option 1: Add a minimal backward pass to populate optimizer states + for name, params in base_params_dict.items(): + if name in multi_optimizers: + # Create a dummy loss and do backward + dummy_loss = params[0].sum() + dummy_loss.backward() + # Perform an optimization step + multi_optimizers[name].step() + # Zero gradients for next steps + multi_optimizers[name].zero_grad() + + # Save optimizer states + save_optimizer_state(multi_optimizers, tmp_path) + + # Create new optimizers with the same config + config = MultiAdamConfig( + lr=1e-3, + optimizer_groups={ + "actor": {"lr": 1e-4}, + "critic": {"lr": 5e-4}, + "temperature": {"lr": 2e-3}, + }, + ) + new_optimizers = config.build(base_params_dict) + + # Load optimizer states + loaded_optimizers = load_optimizer_state(new_optimizers, tmp_path) + + # Verify state dictionaries match + for name in multi_optimizers: + torch.testing.assert_close(multi_optimizers[name].state_dict(), loaded_optimizers[name].state_dict()) + + +def test_save_and_load_empty_multi_optimizer_state(base_params_dict, tmp_path): + """Test saving and loading optimizer states even when the state is empty (no backward pass).""" + # Create config and build optimizers + config = MultiAdamConfig( + lr=1e-3, + optimizer_groups={ + "actor": {"lr": 1e-4}, + "critic": {"lr": 5e-4}, + "temperature": {"lr": 2e-3}, + }, + ) + optimizers = config.build(base_params_dict) + + # Save optimizer states without any backward pass (empty state) + save_optimizer_state(optimizers, tmp_path) + + # Create new optimizers with the same config + new_optimizers = config.build(base_params_dict) + + # Load optimizer states + loaded_optimizers = load_optimizer_state(new_optimizers, tmp_path) + + # Verify hyperparameters match even with empty state + for name, optimizer in optimizers.items(): + assert optimizer.defaults["lr"] == loaded_optimizers[name].defaults["lr"] + assert optimizer.defaults["weight_decay"] == loaded_optimizers[name].defaults["weight_decay"] + assert optimizer.defaults["betas"] == loaded_optimizers[name].defaults["betas"] + + # Verify state dictionaries match (they will be empty) + torch.testing.assert_close( + optimizer.state_dict()["param_groups"], loaded_optimizers[name].state_dict()["param_groups"] + ) diff --git a/tests/policies/hilserl/test_modeling_classifier.py b/tests/policies/hilserl/test_modeling_classifier.py new file mode 100644 index 0000000000..526e1f17dd --- /dev/null +++ b/tests/policies/hilserl/test_modeling_classifier.py @@ -0,0 +1,139 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from lerobot.common.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig +from lerobot.common.policies.sac.reward_model.modeling_classifier import ClassifierOutput +from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature +from tests.utils import require_package + + +def test_classifier_output(): + output = ClassifierOutput( + logits=torch.tensor([1, 2, 3]), + probabilities=torch.tensor([0.1, 0.2, 0.3]), + hidden_states=None, + ) + + assert ( + f"{output}" + == "ClassifierOutput(logits=tensor([1, 2, 3]), probabilities=tensor([0.1000, 0.2000, 0.3000]), hidden_states=None)" + ) + + +@require_package("transformers") +def test_binary_classifier_with_default_params(): + from lerobot.common.policies.sac.reward_model.modeling_classifier import Classifier + + config = RewardClassifierConfig() + config.input_features = { + "observation.image": PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)), + } + config.output_features = { + "next.reward": PolicyFeature(type=FeatureType.REWARD, shape=(1,)), + } + config.normalization_mapping = { + "VISUAL": NormalizationMode.IDENTITY, + "REWARD": NormalizationMode.IDENTITY, + } + config.num_cameras = 1 + classifier = Classifier(config) + + batch_size = 10 + + input = { + "observation.image": torch.rand((batch_size, 3, 128, 128)), + "next.reward": torch.randint(low=0, high=2, size=(batch_size,)).float(), + } + + images, labels = classifier.extract_images_and_labels(input) + assert len(images) == 1 + assert images[0].shape == torch.Size([batch_size, 3, 128, 128]) + assert labels.shape == torch.Size([batch_size]) + + output = classifier.predict(images) + + assert output is not None + assert output.logits.size() == torch.Size([batch_size]) + assert not torch.isnan(output.logits).any(), "Tensor contains NaN values" + assert output.probabilities.shape == torch.Size([batch_size]) + assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values" + assert output.hidden_states.shape == torch.Size([batch_size, 256]) + assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values" + + +@require_package("transformers") +def test_multiclass_classifier(): + from lerobot.common.policies.sac.reward_model.modeling_classifier import Classifier + + num_classes = 5 + config = RewardClassifierConfig() + config.input_features = { + "observation.image": PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)), + } + config.output_features = { + "next.reward": PolicyFeature(type=FeatureType.REWARD, shape=(num_classes,)), + } + config.num_cameras = 1 + config.num_classes = num_classes + classifier = Classifier(config) + + batch_size = 10 + + input = { + "observation.image": torch.rand((batch_size, 3, 128, 128)), + "next.reward": torch.rand((batch_size, num_classes)), + } + + images, labels = classifier.extract_images_and_labels(input) + assert len(images) == 1 + assert images[0].shape == torch.Size([batch_size, 3, 128, 128]) + assert labels.shape == torch.Size([batch_size, num_classes]) + + output = classifier.predict(images) + + assert output is not None + assert output.logits.shape == torch.Size([batch_size, num_classes]) + assert not torch.isnan(output.logits).any(), "Tensor contains NaN values" + assert output.probabilities.shape == torch.Size([batch_size, num_classes]) + assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values" + assert output.hidden_states.shape == torch.Size([batch_size, 256]) + assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values" + + +@require_package("transformers") +def test_default_device(): + from lerobot.common.policies.sac.reward_model.modeling_classifier import Classifier + + config = RewardClassifierConfig() + assert config.device == "cpu" + + classifier = Classifier(config) + for p in classifier.parameters(): + assert p.device == torch.device("cpu") + + +@require_package("transformers") +def test_explicit_device_setup(): + from lerobot.common.policies.sac.reward_model.modeling_classifier import Classifier + + config = RewardClassifierConfig(device="cpu") + assert config.device == "cpu" + + classifier = Classifier(config) + for p in classifier.parameters(): + assert p.device == torch.device("cpu") diff --git a/tests/policies/test_sac_config.py b/tests/policies/test_sac_config.py new file mode 100644 index 0000000000..d94ee41e04 --- /dev/null +++ b/tests/policies/test_sac_config.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from lerobot.common.policies.sac.configuration_sac import ( + ActorLearnerConfig, + ActorNetworkConfig, + ConcurrencyConfig, + CriticNetworkConfig, + PolicyConfig, + SACConfig, +) +from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature + + +def test_sac_config_default_initialization(): + config = SACConfig() + + assert config.normalization_mapping == { + "VISUAL": NormalizationMode.MEAN_STD, + "STATE": NormalizationMode.MIN_MAX, + "ENV": NormalizationMode.MIN_MAX, + "ACTION": NormalizationMode.MIN_MAX, + } + assert config.dataset_stats == { + "observation.image": { + "mean": [0.485, 0.456, 0.406], + "std": [0.229, 0.224, 0.225], + }, + "observation.state": { + "min": [0.0, 0.0], + "max": [1.0, 1.0], + }, + "action": { + "min": [0.0, 0.0, 0.0], + "max": [1.0, 1.0, 1.0], + }, + } + + # Basic parameters + assert config.device == "cpu" + assert config.storage_device == "cpu" + assert config.discount == 0.99 + assert config.temperature_init == 1.0 + assert config.num_critics == 2 + + # Architecture specifics + assert config.vision_encoder_name is None + assert config.freeze_vision_encoder is True + assert config.image_encoder_hidden_dim == 32 + assert config.shared_encoder is True + assert config.num_discrete_actions is None + assert config.image_embedding_pooling_dim == 8 + + # Training parameters + assert config.online_steps == 1000000 + assert config.online_env_seed == 10000 + assert config.online_buffer_capacity == 100000 + assert config.offline_buffer_capacity == 100000 + assert config.async_prefetch is False + assert config.online_step_before_learning == 100 + assert config.policy_update_freq == 1 + + # SAC algorithm parameters + assert config.num_subsample_critics is None + assert config.critic_lr == 3e-4 + assert config.actor_lr == 3e-4 + assert config.temperature_lr == 3e-4 + assert config.critic_target_update_weight == 0.005 + assert config.utd_ratio == 1 + assert config.state_encoder_hidden_dim == 256 + assert config.latent_dim == 256 + assert config.target_entropy is None + assert config.use_backup_entropy is True + assert config.grad_clip_norm == 40.0 + + # Dataset stats defaults + expected_dataset_stats = { + "observation.image": { + "mean": [0.485, 0.456, 0.406], + "std": [0.229, 0.224, 0.225], + }, + "observation.state": { + "min": [0.0, 0.0], + "max": [1.0, 1.0], + }, + "action": { + "min": [0.0, 0.0, 0.0], + "max": [1.0, 1.0, 1.0], + }, + } + assert config.dataset_stats == expected_dataset_stats + + # Critic network configuration + assert config.critic_network_kwargs.hidden_dims == [256, 256] + assert config.critic_network_kwargs.activate_final is True + assert config.critic_network_kwargs.final_activation is None + + # Actor network configuration + assert config.actor_network_kwargs.hidden_dims == [256, 256] + assert config.actor_network_kwargs.activate_final is True + + # Policy configuration + assert config.policy_kwargs.use_tanh_squash is True + assert config.policy_kwargs.std_min == 1e-5 + assert config.policy_kwargs.std_max == 10.0 + assert config.policy_kwargs.init_final == 0.05 + + # Discrete critic network configuration + assert config.discrete_critic_network_kwargs.hidden_dims == [256, 256] + assert config.discrete_critic_network_kwargs.activate_final is True + assert config.discrete_critic_network_kwargs.final_activation is None + + # Actor learner configuration + assert config.actor_learner_config.learner_host == "127.0.0.1" + assert config.actor_learner_config.learner_port == 50051 + assert config.actor_learner_config.policy_parameters_push_frequency == 4 + + # Concurrency configuration + assert config.concurrency.actor == "threads" + assert config.concurrency.learner == "threads" + + assert isinstance(config.actor_network_kwargs, ActorNetworkConfig) + assert isinstance(config.critic_network_kwargs, CriticNetworkConfig) + assert isinstance(config.policy_kwargs, PolicyConfig) + assert isinstance(config.actor_learner_config, ActorLearnerConfig) + assert isinstance(config.concurrency, ConcurrencyConfig) + + +def test_critic_network_kwargs(): + config = CriticNetworkConfig() + assert config.hidden_dims == [256, 256] + assert config.activate_final is True + assert config.final_activation is None + + +def test_actor_network_kwargs(): + config = ActorNetworkConfig() + assert config.hidden_dims == [256, 256] + assert config.activate_final is True + + +def test_policy_kwargs(): + config = PolicyConfig() + assert config.use_tanh_squash is True + assert config.std_min == 1e-5 + assert config.std_max == 10.0 + assert config.init_final == 0.05 + + +def test_actor_learner_config(): + config = ActorLearnerConfig() + assert config.learner_host == "127.0.0.1" + assert config.learner_port == 50051 + assert config.policy_parameters_push_frequency == 4 + + +def test_concurrency_config(): + config = ConcurrencyConfig() + assert config.actor == "threads" + assert config.learner == "threads" + + +def test_sac_config_custom_initialization(): + config = SACConfig( + device="cpu", + discount=0.95, + temperature_init=0.5, + num_critics=3, + ) + + assert config.device == "cpu" + assert config.discount == 0.95 + assert config.temperature_init == 0.5 + assert config.num_critics == 3 + + +def test_validate_features(): + config = SACConfig( + input_features={"observation.state": PolicyFeature(type=FeatureType.STATE, shape=(10,))}, + output_features={"action": PolicyFeature(type=FeatureType.ACTION, shape=(3,))}, + ) + config.validate_features() + + +def test_validate_features_missing_observation(): + config = SACConfig( + input_features={"wrong_key": PolicyFeature(type=FeatureType.STATE, shape=(10,))}, + output_features={"action": PolicyFeature(type=FeatureType.ACTION, shape=(3,))}, + ) + with pytest.raises( + ValueError, match="You must provide either 'observation.state' or an image observation" + ): + config.validate_features() + + +def test_validate_features_missing_action(): + config = SACConfig( + input_features={"observation.state": PolicyFeature(type=FeatureType.STATE, shape=(10,))}, + output_features={"wrong_key": PolicyFeature(type=FeatureType.ACTION, shape=(3,))}, + ) + with pytest.raises(ValueError, match="You must provide 'action' in the output features"): + config.validate_features() diff --git a/tests/policies/test_sac_policy.py b/tests/policies/test_sac_policy.py new file mode 100644 index 0000000000..e4e2dd8a99 --- /dev/null +++ b/tests/policies/test_sac_policy.py @@ -0,0 +1,541 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import pytest +import torch +from torch import Tensor, nn + +from lerobot.common.policies.sac.configuration_sac import SACConfig +from lerobot.common.policies.sac.modeling_sac import MLP, SACPolicy +from lerobot.common.utils.random_utils import seeded_context, set_seed +from lerobot.configs.types import FeatureType, PolicyFeature + +try: + import transformers # noqa: F401 + + TRANSFORMERS_AVAILABLE = True +except ImportError: + TRANSFORMERS_AVAILABLE = False + + +@pytest.fixture(autouse=True) +def set_random_seed(): + seed = 42 + set_seed(seed) + + +def test_mlp_with_default_args(): + mlp = MLP(input_dim=10, hidden_dims=[256, 256]) + + x = torch.randn(10) + y = mlp(x) + assert y.shape == (256,) + + +def test_mlp_with_batch_dim(): + mlp = MLP(input_dim=10, hidden_dims=[256, 256]) + x = torch.randn(2, 10) + y = mlp(x) + assert y.shape == (2, 256) + + +def test_forward_with_empty_hidden_dims(): + mlp = MLP(input_dim=10, hidden_dims=[]) + x = torch.randn(1, 10) + assert mlp(x).shape == (1, 10) + + +def test_mlp_with_dropout(): + mlp = MLP(input_dim=10, hidden_dims=[256, 256, 11], dropout_rate=0.1) + x = torch.randn(1, 10) + y = mlp(x) + assert y.shape == (1, 11) + + drop_out_layers_count = sum(isinstance(layer, nn.Dropout) for layer in mlp.net) + assert drop_out_layers_count == 2 + + +def test_mlp_with_custom_final_activation(): + mlp = MLP(input_dim=10, hidden_dims=[256, 256], final_activation=torch.nn.Tanh()) + x = torch.randn(1, 10) + y = mlp(x) + assert y.shape == (1, 256) + assert (y >= -1).all() and (y <= 1).all() + + +def test_sac_policy_with_default_args(): + with pytest.raises(ValueError, match="should be an instance of class `PreTrainedConfig`"): + SACPolicy() + + +def create_dummy_state(batch_size: int, state_dim: int = 10) -> Tensor: + return { + "observation.state": torch.randn(batch_size, state_dim), + } + + +def create_dummy_with_visual_input(batch_size: int, state_dim: int = 10) -> Tensor: + return { + "observation.image": torch.randn(batch_size, 3, 84, 84), + "observation.state": torch.randn(batch_size, state_dim), + } + + +def create_dummy_action(batch_size: int, action_dim: int = 10) -> Tensor: + return torch.randn(batch_size, action_dim) + + +def create_default_train_batch( + batch_size: int = 8, state_dim: int = 10, action_dim: int = 10 +) -> dict[str, Tensor]: + return { + "action": create_dummy_action(batch_size, action_dim), + "reward": torch.randn(batch_size), + "state": create_dummy_state(batch_size, state_dim), + "next_state": create_dummy_state(batch_size, state_dim), + "done": torch.randn(batch_size), + } + + +def create_train_batch_with_visual_input( + batch_size: int = 8, state_dim: int = 10, action_dim: int = 10 +) -> dict[str, Tensor]: + return { + "action": create_dummy_action(batch_size, action_dim), + "reward": torch.randn(batch_size), + "state": create_dummy_with_visual_input(batch_size, state_dim), + "next_state": create_dummy_with_visual_input(batch_size, state_dim), + "done": torch.randn(batch_size), + } + + +def create_observation_batch(batch_size: int = 8, state_dim: int = 10) -> dict[str, Tensor]: + return { + "observation.state": torch.randn(batch_size, state_dim), + } + + +def create_observation_batch_with_visual_input(batch_size: int = 8, state_dim: int = 10) -> dict[str, Tensor]: + return { + "observation.state": torch.randn(batch_size, state_dim), + "observation.image": torch.randn(batch_size, 3, 84, 84), + } + + +def make_optimizers(policy: SACPolicy, has_discrete_action: bool = False) -> dict[str, torch.optim.Optimizer]: + """Create optimizers for the SAC policy.""" + optimizer_actor = torch.optim.Adam( + # Handle the case of shared encoder where the encoder weights are not optimized with the actor gradient + params=[ + p + for n, p in policy.actor.named_parameters() + if not policy.config.shared_encoder or not n.startswith("encoder") + ], + lr=policy.config.actor_lr, + ) + optimizer_critic = torch.optim.Adam( + params=policy.critic_ensemble.parameters(), + lr=policy.config.critic_lr, + ) + optimizer_temperature = torch.optim.Adam( + params=[policy.log_alpha], + lr=policy.config.critic_lr, + ) + + optimizers = { + "actor": optimizer_actor, + "critic": optimizer_critic, + "temperature": optimizer_temperature, + } + + if has_discrete_action: + optimizers["discrete_critic"] = torch.optim.Adam( + params=policy.discrete_critic.parameters(), + lr=policy.config.critic_lr, + ) + + return optimizers + + +def create_default_config( + state_dim: int, continuous_action_dim: int, has_discrete_action: bool = False +) -> SACConfig: + action_dim = continuous_action_dim + if has_discrete_action: + action_dim += 1 + + config = SACConfig( + input_features={"observation.state": PolicyFeature(type=FeatureType.STATE, shape=(state_dim,))}, + output_features={"action": PolicyFeature(type=FeatureType.ACTION, shape=(continuous_action_dim,))}, + dataset_stats={ + "observation.state": { + "min": [0.0] * state_dim, + "max": [1.0] * state_dim, + }, + "action": { + "min": [0.0] * continuous_action_dim, + "max": [1.0] * continuous_action_dim, + }, + }, + ) + config.validate_features() + return config + + +def create_config_with_visual_input( + state_dim: int, continuous_action_dim: int, has_discrete_action: bool = False +) -> SACConfig: + config = create_default_config( + state_dim=state_dim, + continuous_action_dim=continuous_action_dim, + has_discrete_action=has_discrete_action, + ) + config.input_features["observation.image"] = PolicyFeature(type=FeatureType.VISUAL, shape=(3, 84, 84)) + config.dataset_stats["observation.image"] = { + "mean": torch.randn(3, 1, 1), + "std": torch.randn(3, 1, 1), + } + + # Let make tests a little bit faster + config.state_encoder_hidden_dim = 32 + config.latent_dim = 32 + + config.validate_features() + return config + + +@pytest.mark.parametrize("batch_size,state_dim,action_dim", [(2, 6, 6), (1, 10, 10)]) +def test_sac_policy_with_default_config(batch_size: int, state_dim: int, action_dim: int): + batch = create_default_train_batch(batch_size=batch_size, action_dim=action_dim, state_dim=state_dim) + config = create_default_config(state_dim=state_dim, continuous_action_dim=action_dim) + + policy = SACPolicy(config=config) + policy.train() + + optimizers = make_optimizers(policy) + + cirtic_loss = policy.forward(batch, model="critic")["loss_critic"] + assert cirtic_loss.item() is not None + assert cirtic_loss.shape == () + cirtic_loss.backward() + optimizers["critic"].step() + + actor_loss = policy.forward(batch, model="actor")["loss_actor"] + assert actor_loss.item() is not None + assert actor_loss.shape == () + + actor_loss.backward() + optimizers["actor"].step() + + temperature_loss = policy.forward(batch, model="temperature")["loss_temperature"] + assert temperature_loss.item() is not None + assert temperature_loss.shape == () + + temperature_loss.backward() + optimizers["temperature"].step() + + policy.eval() + with torch.no_grad(): + observation_batch = create_observation_batch(batch_size=batch_size, state_dim=state_dim) + selected_action = policy.select_action(observation_batch) + assert selected_action.shape == (batch_size, action_dim) + + +@pytest.mark.parametrize("batch_size,state_dim,action_dim", [(2, 6, 6), (1, 10, 10)]) +def test_sac_policy_with_visual_input(batch_size: int, state_dim: int, action_dim: int): + config = create_config_with_visual_input(state_dim=state_dim, continuous_action_dim=action_dim) + policy = SACPolicy(config=config) + + batch = create_train_batch_with_visual_input( + batch_size=batch_size, state_dim=state_dim, action_dim=action_dim + ) + + policy.train() + + optimizers = make_optimizers(policy) + + cirtic_loss = policy.forward(batch, model="critic")["loss_critic"] + assert cirtic_loss.item() is not None + assert cirtic_loss.shape == () + cirtic_loss.backward() + optimizers["critic"].step() + + actor_loss = policy.forward(batch, model="actor")["loss_actor"] + assert actor_loss.item() is not None + assert actor_loss.shape == () + + actor_loss.backward() + optimizers["actor"].step() + + temperature_loss = policy.forward(batch, model="temperature")["loss_temperature"] + assert temperature_loss.item() is not None + assert temperature_loss.shape == () + + temperature_loss.backward() + optimizers["temperature"].step() + + policy.eval() + with torch.no_grad(): + observation_batch = create_observation_batch_with_visual_input( + batch_size=batch_size, state_dim=state_dim + ) + selected_action = policy.select_action(observation_batch) + assert selected_action.shape == (batch_size, action_dim) + + +# Let's check best candidates for pretrained encoders +@pytest.mark.parametrize( + "batch_size,state_dim,action_dim,vision_encoder_name", + [(1, 6, 6, "helper2424/resnet10"), (1, 6, 6, "facebook/convnext-base-224")], +) +@pytest.mark.skipif(not TRANSFORMERS_AVAILABLE, reason="Transformers are not installed") +def test_sac_policy_with_pretrained_encoder( + batch_size: int, state_dim: int, action_dim: int, vision_encoder_name: str +): + config = create_config_with_visual_input(state_dim=state_dim, continuous_action_dim=action_dim) + config.vision_encoder_name = vision_encoder_name + policy = SACPolicy(config=config) + policy.train() + + batch = create_train_batch_with_visual_input( + batch_size=batch_size, state_dim=state_dim, action_dim=action_dim + ) + + optimizers = make_optimizers(policy) + + cirtic_loss = policy.forward(batch, model="critic")["loss_critic"] + assert cirtic_loss.item() is not None + assert cirtic_loss.shape == () + cirtic_loss.backward() + optimizers["critic"].step() + + actor_loss = policy.forward(batch, model="actor")["loss_actor"] + assert actor_loss.item() is not None + assert actor_loss.shape == () + + +def test_sac_policy_with_shared_encoder(): + batch_size = 2 + action_dim = 10 + state_dim = 10 + config = create_config_with_visual_input(state_dim=state_dim, continuous_action_dim=action_dim) + config.shared_encoder = True + + policy = SACPolicy(config=config) + policy.train() + + batch = create_train_batch_with_visual_input( + batch_size=batch_size, state_dim=state_dim, action_dim=action_dim + ) + + policy.train() + + optimizers = make_optimizers(policy) + + cirtic_loss = policy.forward(batch, model="critic")["loss_critic"] + assert cirtic_loss.item() is not None + assert cirtic_loss.shape == () + cirtic_loss.backward() + optimizers["critic"].step() + + actor_loss = policy.forward(batch, model="actor")["loss_actor"] + assert actor_loss.item() is not None + assert actor_loss.shape == () + + actor_loss.backward() + optimizers["actor"].step() + + +def test_sac_policy_with_discrete_critic(): + batch_size = 2 + continuous_action_dim = 9 + full_action_dim = continuous_action_dim + 1 # the last action is discrete + state_dim = 10 + config = create_config_with_visual_input( + state_dim=state_dim, continuous_action_dim=continuous_action_dim, has_discrete_action=True + ) + + num_discrete_actions = 5 + config.num_discrete_actions = num_discrete_actions + + policy = SACPolicy(config=config) + policy.train() + + batch = create_train_batch_with_visual_input( + batch_size=batch_size, state_dim=state_dim, action_dim=full_action_dim + ) + + policy.train() + + optimizers = make_optimizers(policy, has_discrete_action=True) + + cirtic_loss = policy.forward(batch, model="critic")["loss_critic"] + assert cirtic_loss.item() is not None + assert cirtic_loss.shape == () + cirtic_loss.backward() + optimizers["critic"].step() + + discrete_critic_loss = policy.forward(batch, model="discrete_critic")["loss_discrete_critic"] + assert discrete_critic_loss.item() is not None + assert discrete_critic_loss.shape == () + discrete_critic_loss.backward() + optimizers["discrete_critic"].step() + + actor_loss = policy.forward(batch, model="actor")["loss_actor"] + assert actor_loss.item() is not None + assert actor_loss.shape == () + + actor_loss.backward() + optimizers["actor"].step() + + policy.eval() + with torch.no_grad(): + observation_batch = create_observation_batch_with_visual_input( + batch_size=batch_size, state_dim=state_dim + ) + selected_action = policy.select_action(observation_batch) + assert selected_action.shape == (batch_size, full_action_dim) + + discrete_actions = selected_action[:, -1].long() + discrete_action_values = set(discrete_actions.tolist()) + + assert all(action in range(num_discrete_actions) for action in discrete_action_values), ( + f"Discrete action {discrete_action_values} is not in range({num_discrete_actions})" + ) + + +def test_sac_policy_with_default_entropy(): + config = create_default_config(continuous_action_dim=10, state_dim=10) + policy = SACPolicy(config=config) + assert policy.target_entropy == -5.0 + + +def test_sac_policy_default_target_entropy_with_discrete_action(): + config = create_config_with_visual_input(state_dim=10, continuous_action_dim=6, has_discrete_action=True) + policy = SACPolicy(config=config) + assert policy.target_entropy == -3.0 + + +def test_sac_policy_with_predefined_entropy(): + config = create_default_config(state_dim=10, continuous_action_dim=6) + config.target_entropy = -3.5 + + policy = SACPolicy(config=config) + assert policy.target_entropy == pytest.approx(-3.5) + + +def test_sac_policy_update_temperature(): + config = create_default_config(continuous_action_dim=10, state_dim=10) + policy = SACPolicy(config=config) + + assert policy.temperature == pytest.approx(1.0) + policy.log_alpha.data = torch.tensor([math.log(0.1)]) + policy.update_temperature() + assert policy.temperature == pytest.approx(0.1) + + +def test_sac_policy_update_target_network(): + config = create_default_config(state_dim=10, continuous_action_dim=6) + config.critic_target_update_weight = 1.0 + + policy = SACPolicy(config=config) + policy.train() + + for p in policy.critic_ensemble.parameters(): + p.data = torch.ones_like(p.data) + + policy.update_target_networks() + for p in policy.critic_target.parameters(): + assert torch.allclose(p.data, torch.ones_like(p.data)), ( + f"Target network {p.data} is not equal to {torch.ones_like(p.data)}" + ) + + +@pytest.mark.parametrize("num_critics", [1, 3]) +def test_sac_policy_with_critics_number_of_heads(num_critics: int): + batch_size = 2 + action_dim = 10 + state_dim = 10 + config = create_config_with_visual_input(state_dim=state_dim, continuous_action_dim=action_dim) + config.num_critics = num_critics + + policy = SACPolicy(config=config) + policy.train() + + assert len(policy.critic_ensemble.critics) == num_critics + + batch = create_train_batch_with_visual_input( + batch_size=batch_size, state_dim=state_dim, action_dim=action_dim + ) + + policy.train() + + optimizers = make_optimizers(policy) + + cirtic_loss = policy.forward(batch, model="critic")["loss_critic"] + assert cirtic_loss.item() is not None + assert cirtic_loss.shape == () + cirtic_loss.backward() + optimizers["critic"].step() + + +def test_sac_policy_save_and_load(tmp_path): + root = tmp_path / "test_sac_save_and_load" + + state_dim = 10 + action_dim = 10 + batch_size = 2 + + config = create_default_config(state_dim=state_dim, continuous_action_dim=action_dim) + policy = SACPolicy(config=config) + policy.eval() + policy.save_pretrained(root) + loaded_policy = SACPolicy.from_pretrained(root, config=config) + loaded_policy.eval() + + batch = create_default_train_batch(batch_size=1, state_dim=10, action_dim=10) + + with torch.no_grad(): + with seeded_context(12): + # Collect policy values before saving + cirtic_loss = policy.forward(batch, model="critic")["loss_critic"] + actor_loss = policy.forward(batch, model="actor")["loss_actor"] + temperature_loss = policy.forward(batch, model="temperature")["loss_temperature"] + + observation_batch = create_observation_batch(batch_size=batch_size, state_dim=state_dim) + actions = policy.select_action(observation_batch) + + with seeded_context(12): + # Collect policy values after loading + loaded_cirtic_loss = loaded_policy.forward(batch, model="critic")["loss_critic"] + loaded_actor_loss = loaded_policy.forward(batch, model="actor")["loss_actor"] + loaded_temperature_loss = loaded_policy.forward(batch, model="temperature")["loss_temperature"] + + loaded_observation_batch = create_observation_batch(batch_size=batch_size, state_dim=state_dim) + loaded_actions = loaded_policy.select_action(loaded_observation_batch) + + assert policy.state_dict().keys() == loaded_policy.state_dict().keys() + for k in policy.state_dict(): + assert torch.allclose(policy.state_dict()[k], loaded_policy.state_dict()[k], atol=1e-6) + + # Compare values before and after saving and loading + # They should be the same + assert torch.allclose(cirtic_loss, loaded_cirtic_loss) + assert torch.allclose(actor_loss, loaded_actor_loss) + assert torch.allclose(temperature_loss, loaded_temperature_loss) + assert torch.allclose(actions, loaded_actions) diff --git a/tests/rl/test_actor.py b/tests/rl/test_actor.py new file mode 100644 index 0000000000..0cf6a8f644 --- /dev/null +++ b/tests/rl/test_actor.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from concurrent import futures +from unittest.mock import patch + +import pytest +import torch +from torch.multiprocessing import Event, Queue + +from lerobot.common.utils.transition import Transition +from tests.utils import require_package + + +def create_learner_service_stub(): + import grpc + + from lerobot.common.transport import services_pb2, services_pb2_grpc + + class MockLearnerService(services_pb2_grpc.LearnerServiceServicer): + def __init__(self): + self.ready_call_count = 0 + self.should_fail = False + + def Ready(self, request, context): # noqa: N802 + self.ready_call_count += 1 + if self.should_fail: + context.set_code(grpc.StatusCode.UNAVAILABLE) + context.set_details("Service unavailable") + raise grpc.RpcError("Service unavailable") + return services_pb2.Empty() + + """Fixture to start a LearnerService gRPC server and provide a connected stub.""" + + servicer = MockLearnerService() + + # Create a gRPC server and add our servicer to it. + server = grpc.server(futures.ThreadPoolExecutor(max_workers=4)) + services_pb2_grpc.add_LearnerServiceServicer_to_server(servicer, server) + port = server.add_insecure_port("[::]:0") # bind to a free port chosen by OS + server.start() # start the server (non-blocking call):contentReference[oaicite:1]{index=1} + + # Create a client channel and stub connected to the server's port. + channel = grpc.insecure_channel(f"localhost:{port}") + return services_pb2_grpc.LearnerServiceStub(channel), servicer, channel, server + + +def close_service_stub(channel, server): + channel.close() + server.stop(None) + + +@require_package("grpc") +def test_establish_learner_connection_success(): + from lerobot.scripts.rl.actor import establish_learner_connection + + """Test successful connection establishment.""" + stub, _servicer, channel, server = create_learner_service_stub() + + shutdown_event = Event() + + # Test successful connection + result = establish_learner_connection(stub, shutdown_event, attempts=5) + + assert result is True + + close_service_stub(channel, server) + + +@require_package("grpc") +def test_establish_learner_connection_failure(): + from lerobot.scripts.rl.actor import establish_learner_connection + + """Test connection failure.""" + stub, servicer, channel, server = create_learner_service_stub() + servicer.should_fail = True + + shutdown_event = Event() + + # Test failed connection + with patch("time.sleep"): # Speed up the test + result = establish_learner_connection(stub, shutdown_event, attempts=2) + + assert result is False + + close_service_stub(channel, server) + + +@require_package("grpc") +def test_push_transitions_to_transport_queue(): + from lerobot.common.transport.utils import bytes_to_transitions + from lerobot.scripts.rl.actor import push_transitions_to_transport_queue + from tests.transport.test_transport_utils import assert_transitions_equal + + """Test pushing transitions to transport queue.""" + # Create mock transitions + transitions = [] + for i in range(3): + transition = Transition( + state={"observation": torch.randn(3, 64, 64), "state": torch.randn(10)}, + action=torch.randn(5), + reward=torch.tensor(1.0 + i), + done=torch.tensor(False), + truncated=torch.tensor(False), + next_state={"observation": torch.randn(3, 64, 64), "state": torch.randn(10)}, + complementary_info={"step": torch.tensor(i)}, + ) + transitions.append(transition) + + transitions_queue = Queue() + + # Test pushing transitions + push_transitions_to_transport_queue(transitions, transitions_queue) + + # Verify the data can be retrieved + serialized_data = transitions_queue.get() + assert isinstance(serialized_data, bytes) + deserialized_transitions = bytes_to_transitions(serialized_data) + assert len(deserialized_transitions) == len(transitions) + for i, deserialized_transition in enumerate(deserialized_transitions): + assert_transitions_equal(deserialized_transition, transitions[i]) + + +@require_package("grpc") +@pytest.mark.timeout(3) # force cross-platform watchdog +def test_transitions_stream(): + from lerobot.scripts.rl.actor import transitions_stream + + """Test transitions stream functionality.""" + shutdown_event = Event() + transitions_queue = Queue() + + # Add test data to queue + test_data = [b"transition_data_1", b"transition_data_2", b"transition_data_3"] + for data in test_data: + transitions_queue.put(data) + + # Collect streamed data + streamed_data = [] + stream_generator = transitions_stream(shutdown_event, transitions_queue, 0.1) + + # Process a few items + for i, message in enumerate(stream_generator): + streamed_data.append(message) + if i >= len(test_data) - 1: + shutdown_event.set() + break + + # Verify we got messages + assert len(streamed_data) == len(test_data) + assert streamed_data[0].data == b"transition_data_1" + assert streamed_data[1].data == b"transition_data_2" + assert streamed_data[2].data == b"transition_data_3" + + +@require_package("grpc") +@pytest.mark.timeout(3) # force cross-platform watchdog +def test_interactions_stream(): + from lerobot.common.transport.utils import bytes_to_python_object, python_object_to_bytes + from lerobot.scripts.rl.actor import interactions_stream + + """Test interactions stream functionality.""" + shutdown_event = Event() + interactions_queue = Queue() + + # Create test interaction data (similar structure to what would be sent) + test_interactions = [ + {"episode_reward": 10.5, "step": 1, "policy_fps": 30.2}, + {"episode_reward": 15.2, "step": 2, "policy_fps": 28.7}, + {"episode_reward": 8.7, "step": 3, "policy_fps": 29.1}, + ] + + # Serialize the interaction data as it would be in practice + test_data = [ + interactions_queue.put(python_object_to_bytes(interaction)) for interaction in test_interactions + ] + + # Collect streamed data + streamed_data = [] + stream_generator = interactions_stream(shutdown_event, interactions_queue, 0.1) + + # Process the items + for i, message in enumerate(stream_generator): + streamed_data.append(message) + if i >= len(test_data) - 1: + shutdown_event.set() + break + + # Verify we got messages + assert len(streamed_data) == len(test_data) + + # Verify the messages can be deserialized back to original data + for i, message in enumerate(streamed_data): + deserialized_interaction = bytes_to_python_object(message.data) + assert deserialized_interaction == test_interactions[i] diff --git a/tests/rl/test_actor_learner.py b/tests/rl/test_actor_learner.py new file mode 100644 index 0000000000..cb72da7e40 --- /dev/null +++ b/tests/rl/test_actor_learner.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket +import threading +import time + +import pytest +import torch +from torch.multiprocessing import Event, Queue + +from lerobot.common.policies.sac.configuration_sac import SACConfig +from lerobot.common.utils.transition import Transition +from lerobot.configs.train import TrainRLServerPipelineConfig +from tests.utils import require_package + + +def create_test_transitions(count: int = 3) -> list[Transition]: + """Create test transitions for integration testing.""" + transitions = [] + for i in range(count): + transition = Transition( + state={"observation": torch.randn(3, 64, 64), "state": torch.randn(10)}, + action=torch.randn(5), + reward=torch.tensor(1.0 + i), + done=torch.tensor(i == count - 1), # Last transition is done + truncated=torch.tensor(False), + next_state={"observation": torch.randn(3, 64, 64), "state": torch.randn(10)}, + complementary_info={"step": torch.tensor(i), "episode_id": i // 2}, + ) + transitions.append(transition) + return transitions + + +def create_test_interactions(count: int = 3) -> list[dict]: + """Create test interactions for integration testing.""" + interactions = [] + for i in range(count): + interaction = { + "episode_reward": 10.0 + i * 5, + "step": i * 100, + "policy_fps": 30.0 + i, + "intervention_rate": 0.1 * i, + "episode_length": 200 + i * 50, + } + interactions.append(interaction) + return interactions + + +def find_free_port(): + """Finds a free port on the local machine.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("", 0)) # Bind to port 0 to let the OS choose a free port + s.listen(1) + port = s.getsockname()[1] + return port + + +@pytest.fixture +def cfg(): + cfg = TrainRLServerPipelineConfig() + + port = find_free_port() + + policy_cfg = SACConfig() + policy_cfg.actor_learner_config.learner_host = "127.0.0.1" + policy_cfg.actor_learner_config.learner_port = port + policy_cfg.concurrency.actor = "threads" + policy_cfg.concurrency.learner = "threads" + policy_cfg.actor_learner_config.queue_get_timeout = 0.1 + + cfg.policy = policy_cfg + + return cfg + + +@require_package("grpc") +@pytest.mark.timeout(10) # force cross-platform watchdog +def test_end_to_end_transitions_flow(cfg): + from lerobot.common.transport.utils import bytes_to_transitions + from lerobot.scripts.rl.actor import ( + establish_learner_connection, + learner_service_client, + push_transitions_to_transport_queue, + send_transitions, + ) + from lerobot.scripts.rl.learner import start_learner + from tests.transport.test_transport_utils import assert_transitions_equal + + """Test complete transitions flow from actor to learner.""" + transitions_actor_queue = Queue() + transitions_learner_queue = Queue() + + interactions_queue = Queue() + parameters_queue = Queue() + shutdown_event = Event() + + learner_thread = threading.Thread( + target=start_learner, + args=(parameters_queue, transitions_learner_queue, interactions_queue, shutdown_event, cfg), + ) + learner_thread.start() + + policy_cfg = cfg.policy + learner_client, channel = learner_service_client( + host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port + ) + + assert establish_learner_connection(learner_client, shutdown_event, attempts=5) + + send_transitions_thread = threading.Thread( + target=send_transitions, args=(cfg, transitions_actor_queue, shutdown_event, learner_client, channel) + ) + send_transitions_thread.start() + + input_transitions = create_test_transitions(count=5) + + push_transitions_to_transport_queue(input_transitions, transitions_actor_queue) + + # Wait for learner to start + time.sleep(0.1) + + shutdown_event.set() + + # Wait for learner to receive transitions + learner_thread.join() + send_transitions_thread.join() + channel.close() + + received_transitions = [] + while not transitions_learner_queue.empty(): + received_transitions.extend(bytes_to_transitions(transitions_learner_queue.get())) + + assert len(received_transitions) == len(input_transitions) + for i, transition in enumerate(received_transitions): + assert_transitions_equal(transition, input_transitions[i]) + + +@require_package("grpc") +@pytest.mark.timeout(10) +def test_end_to_end_interactions_flow(cfg): + from lerobot.common.transport.utils import bytes_to_python_object, python_object_to_bytes + from lerobot.scripts.rl.actor import ( + establish_learner_connection, + learner_service_client, + send_interactions, + ) + from lerobot.scripts.rl.learner import start_learner + + """Test complete interactions flow from actor to learner.""" + # Queues for actor-learner communication + interactions_actor_queue = Queue() + interactions_learner_queue = Queue() + + # Other queues required by the learner + parameters_queue = Queue() + transitions_learner_queue = Queue() + + shutdown_event = Event() + + # Start the learner in a separate thread + learner_thread = threading.Thread( + target=start_learner, + args=(parameters_queue, transitions_learner_queue, interactions_learner_queue, shutdown_event, cfg), + ) + learner_thread.start() + + # Establish connection from actor to learner + policy_cfg = cfg.policy + learner_client, channel = learner_service_client( + host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port + ) + + assert establish_learner_connection(learner_client, shutdown_event, attempts=5) + + # Start the actor's interaction sending process in a separate thread + send_interactions_thread = threading.Thread( + target=send_interactions, + args=(cfg, interactions_actor_queue, shutdown_event, learner_client, channel), + ) + send_interactions_thread.start() + + # Create and push test interactions to the actor's queue + input_interactions = create_test_interactions(count=5) + for interaction in input_interactions: + interactions_actor_queue.put(python_object_to_bytes(interaction)) + + # Wait for the communication to happen + time.sleep(0.1) + + # Signal shutdown and wait for threads to complete + shutdown_event.set() + learner_thread.join() + send_interactions_thread.join() + channel.close() + + # Verify that the learner received the interactions + received_interactions = [] + while not interactions_learner_queue.empty(): + received_interactions.append(bytes_to_python_object(interactions_learner_queue.get())) + + assert len(received_interactions) == len(input_interactions) + + # Sort by a unique key to handle potential reordering in queues + received_interactions.sort(key=lambda x: x["step"]) + input_interactions.sort(key=lambda x: x["step"]) + + for received, expected in zip(received_interactions, input_interactions, strict=False): + assert received == expected + + +@require_package("grpc") +@pytest.mark.parametrize("data_size", ["small", "large"]) +@pytest.mark.timeout(10) +def test_end_to_end_parameters_flow(cfg, data_size): + from lerobot.common.transport.utils import bytes_to_state_dict, state_to_bytes + from lerobot.scripts.rl.actor import establish_learner_connection, learner_service_client, receive_policy + from lerobot.scripts.rl.learner import start_learner + + """Test complete parameter flow from learner to actor, with small and large data.""" + # Actor's local queue to receive params + parameters_actor_queue = Queue() + # Learner's queue to send params from + parameters_learner_queue = Queue() + + # Other queues required by the learner + transitions_learner_queue = Queue() + interactions_learner_queue = Queue() + + shutdown_event = Event() + + # Start the learner in a separate thread + learner_thread = threading.Thread( + target=start_learner, + args=( + parameters_learner_queue, + transitions_learner_queue, + interactions_learner_queue, + shutdown_event, + cfg, + ), + ) + learner_thread.start() + + # Establish connection from actor to learner + policy_cfg = cfg.policy + learner_client, channel = learner_service_client( + host=policy_cfg.actor_learner_config.learner_host, port=policy_cfg.actor_learner_config.learner_port + ) + + assert establish_learner_connection(learner_client, shutdown_event, attempts=5) + + # Start the actor's parameter receiving process in a separate thread + receive_params_thread = threading.Thread( + target=receive_policy, + args=(cfg, parameters_actor_queue, shutdown_event, learner_client, channel), + ) + receive_params_thread.start() + + # Create test parameters based on parametrization + if data_size == "small": + input_params = {"layer.weight": torch.randn(128, 64)} + else: # "large" + # CHUNK_SIZE is 2MB, so this tensor (4MB) will force chunking + input_params = {"large_layer.weight": torch.randn(1024, 1024)} + + # Simulate learner having new parameters to send + parameters_learner_queue.put(state_to_bytes(input_params)) + + # Wait for the actor to receive the parameters + time.sleep(0.1) + + # Signal shutdown and wait for threads to complete + shutdown_event.set() + learner_thread.join() + receive_params_thread.join() + channel.close() + + # Verify that the actor received the parameters correctly + received_params = bytes_to_state_dict(parameters_actor_queue.get()) + + assert received_params.keys() == input_params.keys() + for key in input_params: + assert torch.allclose(received_params[key], input_params[key]) diff --git a/tests/rl/test_learner_service.py b/tests/rl/test_learner_service.py new file mode 100644 index 0000000000..ee9d06e914 --- /dev/null +++ b/tests/rl/test_learner_service.py @@ -0,0 +1,374 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import threading +import time +from concurrent import futures +from multiprocessing import Event, Queue + +import pytest + +from tests.utils import require_package # our gRPC servicer class + + +@pytest.fixture(scope="function") +def learner_service_stub(): + shutdown_event = Event() + parameters_queue = Queue() + transitions_queue = Queue() + interactions_queue = Queue() + seconds_between_pushes = 1 + client, channel, server = create_learner_service_stub( + shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes + ) + + yield client # provide the stub to the test function + + close_learner_service_stub(channel, server) + + +@require_package("grpc") +def create_learner_service_stub( + shutdown_event: Event, + parameters_queue: Queue, + transitions_queue: Queue, + interactions_queue: Queue, + seconds_between_pushes: int, + queue_get_timeout: float = 0.1, +): + import grpc + + from lerobot.common.transport import services_pb2_grpc # generated from .proto + from lerobot.scripts.rl.learner_service import LearnerService + + """Fixture to start a LearnerService gRPC server and provide a connected stub.""" + + servicer = LearnerService( + shutdown_event=shutdown_event, + parameters_queue=parameters_queue, + seconds_between_pushes=seconds_between_pushes, + transition_queue=transitions_queue, + interaction_message_queue=interactions_queue, + queue_get_timeout=queue_get_timeout, + ) + + # Create a gRPC server and add our servicer to it. + server = grpc.server(futures.ThreadPoolExecutor(max_workers=4)) + services_pb2_grpc.add_LearnerServiceServicer_to_server(servicer, server) + port = server.add_insecure_port("[::]:0") # bind to a free port chosen by OS + server.start() # start the server (non-blocking call):contentReference[oaicite:1]{index=1} + + # Create a client channel and stub connected to the server's port. + channel = grpc.insecure_channel(f"localhost:{port}") + return services_pb2_grpc.LearnerServiceStub(channel), channel, server + + +@require_package("grpc") +def close_learner_service_stub(channel, server): + channel.close() + server.stop(None) + + +@pytest.mark.timeout(3) # force cross-platform watchdog +def test_ready_method(learner_service_stub): + from lerobot.common.transport import services_pb2 + + """Test the ready method of the UserService.""" + request = services_pb2.Empty() + response = learner_service_stub.Ready(request) + assert response == services_pb2.Empty() + + +@require_package("grpc") +@pytest.mark.timeout(3) # force cross-platform watchdog +def test_send_interactions(): + from lerobot.common.transport import services_pb2 + + shutdown_event = Event() + + parameters_queue = Queue() + transitions_queue = Queue() + interactions_queue = Queue() + seconds_between_pushes = 1 + client, channel, server = create_learner_service_stub( + shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes + ) + + list_of_interaction_messages = [ + services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_BEGIN, data=b"1"), + services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE, data=b"2"), + services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"3"), + services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"4"), + services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"5"), + services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_BEGIN, data=b"6"), + services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE, data=b"7"), + services_pb2.InteractionMessage(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"8"), + ] + + def mock_intercations_stream(): + yield from list_of_interaction_messages + + return services_pb2.Empty() + + response = client.SendInteractions(mock_intercations_stream()) + assert response == services_pb2.Empty() + + close_learner_service_stub(channel, server) + + # Extract the data from the interactions queue + interactions = [] + while not interactions_queue.empty(): + interactions.append(interactions_queue.get()) + + assert interactions == [b"123", b"4", b"5", b"678"] + + +@require_package("grpc") +@pytest.mark.timeout(3) # force cross-platform watchdog +def test_send_transitions(): + from lerobot.common.transport import services_pb2 + + """Test the SendTransitions method with various transition data.""" + shutdown_event = Event() + parameters_queue = Queue() + transitions_queue = Queue() + interactions_queue = Queue() + seconds_between_pushes = 1 + + client, channel, server = create_learner_service_stub( + shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes + ) + + # Create test transition messages + list_of_transition_messages = [ + services_pb2.Transition( + transfer_state=services_pb2.TransferState.TRANSFER_BEGIN, data=b"transition_1" + ), + services_pb2.Transition( + transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE, data=b"transition_2" + ), + services_pb2.Transition(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"transition_3"), + services_pb2.Transition(transfer_state=services_pb2.TransferState.TRANSFER_BEGIN, data=b"batch_1"), + services_pb2.Transition(transfer_state=services_pb2.TransferState.TRANSFER_END, data=b"batch_2"), + ] + + def mock_transitions_stream(): + yield from list_of_transition_messages + + response = client.SendTransitions(mock_transitions_stream()) + assert response == services_pb2.Empty() + + close_learner_service_stub(channel, server) + + # Extract the data from the transitions queue + transitions = [] + while not transitions_queue.empty(): + transitions.append(transitions_queue.get()) + + # Should have assembled the chunked data + assert transitions == [b"transition_1transition_2transition_3", b"batch_1batch_2"] + + +@require_package("grpc") +@pytest.mark.timeout(3) # force cross-platform watchdog +def test_send_transitions_empty_stream(): + from lerobot.common.transport import services_pb2 + + """Test SendTransitions with empty stream.""" + shutdown_event = Event() + parameters_queue = Queue() + transitions_queue = Queue() + interactions_queue = Queue() + seconds_between_pushes = 1 + + client, channel, server = create_learner_service_stub( + shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes + ) + + def empty_stream(): + return iter([]) + + response = client.SendTransitions(empty_stream()) + assert response == services_pb2.Empty() + + close_learner_service_stub(channel, server) + + # Queue should remain empty + assert transitions_queue.empty() + + +@require_package("grpc") +@pytest.mark.timeout(10) # force cross-platform watchdog +def test_stream_parameters(): + import time + + from lerobot.common.transport import services_pb2 + + """Test the StreamParameters method.""" + shutdown_event = Event() + parameters_queue = Queue() + transitions_queue = Queue() + interactions_queue = Queue() + seconds_between_pushes = 0.2 # Short delay for testing + + client, channel, server = create_learner_service_stub( + shutdown_event, parameters_queue, transitions_queue, interactions_queue, seconds_between_pushes + ) + + # Add test parameters to the queue + test_params = [b"param_batch_1", b"param_batch_2"] + for param in test_params: + parameters_queue.put(param) + + # Start streaming parameters + request = services_pb2.Empty() + stream = client.StreamParameters(request) + + # Collect streamed parameters and timestamps + received_params = [] + timestamps = [] + + for response in stream: + received_params.append(response.data) + timestamps.append(time.time()) + + # We should receive one last item + break + + parameters_queue.put(b"param_batch_3") + + for response in stream: + received_params.append(response.data) + timestamps.append(time.time()) + + # We should receive only one item + break + + shutdown_event.set() + close_learner_service_stub(channel, server) + + assert received_params == [b"param_batch_2", b"param_batch_3"] + + # Check the time difference between the two sends + time_diff = timestamps[1] - timestamps[0] + # Check if the time difference is close to the expected push frequency + assert time_diff == pytest.approx(seconds_between_pushes, abs=0.1) + + +@require_package("grpc") +@pytest.mark.timeout(3) # force cross-platform watchdog +def test_stream_parameters_with_shutdown(): + from lerobot.common.transport import services_pb2 + + """Test StreamParameters handles shutdown gracefully.""" + shutdown_event = Event() + parameters_queue = Queue() + transitions_queue = Queue() + interactions_queue = Queue() + seconds_between_pushes = 0.1 + queue_get_timeout = 0.001 + + client, channel, server = create_learner_service_stub( + shutdown_event, + parameters_queue, + transitions_queue, + interactions_queue, + seconds_between_pushes, + queue_get_timeout=queue_get_timeout, + ) + + test_params = [b"param_batch_1", b"stop", b"param_batch_3", b"param_batch_4"] + + # create a thread that will put the parameters in the queue + def producer(): + for param in test_params: + parameters_queue.put(param) + time.sleep(0.1) + + producer_thread = threading.Thread(target=producer) + producer_thread.start() + + # Start streaming + request = services_pb2.Empty() + stream = client.StreamParameters(request) + + # Collect streamed parameters + received_params = [] + + for response in stream: + received_params.append(response.data) + + if response.data == b"stop": + shutdown_event.set() + + producer_thread.join() + close_learner_service_stub(channel, server) + + assert received_params == [b"param_batch_1", b"stop"] + + +@require_package("grpc") +@pytest.mark.timeout(3) # force cross-platform watchdog +def test_stream_parameters_waits_and_retries_on_empty_queue(): + import threading + import time + + from lerobot.common.transport import services_pb2 + + """Test that StreamParameters waits and retries when the queue is empty.""" + shutdown_event = Event() + parameters_queue = Queue() + transitions_queue = Queue() + interactions_queue = Queue() + seconds_between_pushes = 0.05 + queue_get_timeout = 0.01 + + client, channel, server = create_learner_service_stub( + shutdown_event, + parameters_queue, + transitions_queue, + interactions_queue, + seconds_between_pushes, + queue_get_timeout=queue_get_timeout, + ) + + request = services_pb2.Empty() + stream = client.StreamParameters(request) + + received_params = [] + + def producer(): + # Let the consumer start and find an empty queue. + # It will wait `seconds_between_pushes` (0.05s), then `get` will timeout after `queue_get_timeout` (0.01s). + # Total time for the first empty loop is > 0.06s. We wait a bit longer to be safe. + time.sleep(0.06) + parameters_queue.put(b"param_after_wait") + time.sleep(0.05) + parameters_queue.put(b"param_after_wait_2") + + producer_thread = threading.Thread(target=producer) + producer_thread.start() + + # The consumer will block here until the producer sends an item. + for response in stream: + received_params.append(response.data) + if response.data == b"param_after_wait_2": + break # We only need one item for this test. + + shutdown_event.set() + producer_thread.join() + close_learner_service_stub(channel, server) + + assert received_params == [b"param_after_wait", b"param_after_wait_2"] diff --git a/tests/transport/test_transport_utils.py b/tests/transport/test_transport_utils.py new file mode 100644 index 0000000000..cf33f52c04 --- /dev/null +++ b/tests/transport/test_transport_utils.py @@ -0,0 +1,571 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +from multiprocessing import Event, Queue +from pickle import UnpicklingError + +import pytest +import torch + +from lerobot.common.utils.transition import Transition +from tests.utils import require_cuda, require_package + + +@require_package("grpc") +def test_bytes_buffer_size_empty_buffer(): + from lerobot.common.transport.utils import bytes_buffer_size + + """Test with an empty buffer.""" + buffer = io.BytesIO() + assert bytes_buffer_size(buffer) == 0 + # Ensure position is reset to beginning + assert buffer.tell() == 0 + + +@require_package("grpc") +def test_bytes_buffer_size_small_buffer(): + from lerobot.common.transport.utils import bytes_buffer_size + + """Test with a small buffer.""" + buffer = io.BytesIO(b"Hello, World!") + assert bytes_buffer_size(buffer) == 13 + assert buffer.tell() == 0 + + +@require_package("grpc") +def test_bytes_buffer_size_large_buffer(): + from lerobot.common.transport.utils import CHUNK_SIZE, bytes_buffer_size + + """Test with a large buffer.""" + data = b"x" * (CHUNK_SIZE * 2 + 1000) + buffer = io.BytesIO(data) + assert bytes_buffer_size(buffer) == len(data) + assert buffer.tell() == 0 + + +@require_package("grpc") +def test_send_bytes_in_chunks_empty_data(): + from lerobot.common.transport.utils import send_bytes_in_chunks, services_pb2 + + """Test sending empty data.""" + message_class = services_pb2.InteractionMessage + chunks = list(send_bytes_in_chunks(b"", message_class)) + assert len(chunks) == 0 + + +@require_package("grpc") +def test_single_chunk_small_data(): + from lerobot.common.transport.utils import send_bytes_in_chunks, services_pb2 + + """Test data that fits in a single chunk.""" + data = b"Some data" + message_class = services_pb2.InteractionMessage + chunks = list(send_bytes_in_chunks(data, message_class)) + + assert len(chunks) == 1 + assert chunks[0].data == b"Some data" + assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_END + + +@require_package("grpc") +def test_not_silent_mode(): + from lerobot.common.transport.utils import send_bytes_in_chunks, services_pb2 + + """Test not silent mode.""" + data = b"Some data" + message_class = services_pb2.InteractionMessage + chunks = list(send_bytes_in_chunks(data, message_class, silent=False)) + assert len(chunks) == 1 + assert chunks[0].data == b"Some data" + + +@require_package("grpc") +def test_send_bytes_in_chunks_large_data(): + from lerobot.common.transport.utils import CHUNK_SIZE, send_bytes_in_chunks, services_pb2 + + """Test sending large data.""" + data = b"x" * (CHUNK_SIZE * 2 + 1000) + message_class = services_pb2.InteractionMessage + chunks = list(send_bytes_in_chunks(data, message_class)) + assert len(chunks) == 3 + assert chunks[0].data == b"x" * CHUNK_SIZE + assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_BEGIN + assert chunks[1].data == b"x" * CHUNK_SIZE + assert chunks[1].transfer_state == services_pb2.TransferState.TRANSFER_MIDDLE + assert chunks[2].data == b"x" * 1000 + assert chunks[2].transfer_state == services_pb2.TransferState.TRANSFER_END + + +@require_package("grpc") +def test_send_bytes_in_chunks_large_data_with_exact_chunk_size(): + from lerobot.common.transport.utils import CHUNK_SIZE, send_bytes_in_chunks, services_pb2 + + """Test sending large data with exact chunk size.""" + data = b"x" * CHUNK_SIZE + message_class = services_pb2.InteractionMessage + chunks = list(send_bytes_in_chunks(data, message_class)) + assert len(chunks) == 1 + assert chunks[0].data == data + assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_END + + +@require_package("grpc") +def test_receive_bytes_in_chunks_empty_data(): + from lerobot.common.transport.utils import receive_bytes_in_chunks + + """Test receiving empty data.""" + queue = Queue() + shutdown_event = Event() + + # Empty iterator + receive_bytes_in_chunks(iter([]), queue, shutdown_event) + + assert queue.empty() + + +@require_package("grpc") +def test_receive_bytes_in_chunks_single_chunk(): + from lerobot.common.transport.utils import receive_bytes_in_chunks, services_pb2 + + """Test receiving a single chunk message.""" + queue = Queue() + shutdown_event = Event() + + data = b"Single chunk data" + chunks = [ + services_pb2.InteractionMessage(data=data, transfer_state=services_pb2.TransferState.TRANSFER_END) + ] + + receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) + + assert queue.get(timeout=0.01) == data + assert queue.empty() + + +@require_package("grpc") +def test_receive_bytes_in_chunks_single_not_end_chunk(): + from lerobot.common.transport.utils import receive_bytes_in_chunks, services_pb2 + + """Test receiving a single chunk message.""" + queue = Queue() + shutdown_event = Event() + + data = b"Single chunk data" + chunks = [ + services_pb2.InteractionMessage(data=data, transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE) + ] + + receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) + + assert queue.empty() + + +@require_package("grpc") +def test_receive_bytes_in_chunks_multiple_chunks(): + from lerobot.common.transport.utils import receive_bytes_in_chunks, services_pb2 + + """Test receiving a multi-chunk message.""" + queue = Queue() + shutdown_event = Event() + + chunks = [ + services_pb2.InteractionMessage( + data=b"First ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN + ), + services_pb2.InteractionMessage( + data=b"Middle ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE + ), + services_pb2.InteractionMessage(data=b"Last", transfer_state=services_pb2.TransferState.TRANSFER_END), + ] + + receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) + + assert queue.get(timeout=0.01) == b"First Middle Last" + assert queue.empty() + + +@require_package("grpc") +def test_receive_bytes_in_chunks_multiple_messages(): + from lerobot.common.transport.utils import receive_bytes_in_chunks, services_pb2 + + """Test receiving multiple complete messages in sequence.""" + queue = Queue() + shutdown_event = Event() + + chunks = [ + # First message - single chunk + services_pb2.InteractionMessage( + data=b"Message1", transfer_state=services_pb2.TransferState.TRANSFER_END + ), + # Second message - multi chunk + services_pb2.InteractionMessage( + data=b"Start2 ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN + ), + services_pb2.InteractionMessage( + data=b"Middle2 ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE + ), + services_pb2.InteractionMessage(data=b"End2", transfer_state=services_pb2.TransferState.TRANSFER_END), + # Third message - single chunk + services_pb2.InteractionMessage( + data=b"Message3", transfer_state=services_pb2.TransferState.TRANSFER_END + ), + ] + + receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) + + # Should have three messages in queue + assert queue.get(timeout=0.01) == b"Message1" + assert queue.get(timeout=0.01) == b"Start2 Middle2 End2" + assert queue.get(timeout=0.01) == b"Message3" + assert queue.empty() + + +@require_package("grpc") +def test_receive_bytes_in_chunks_shutdown_during_receive(): + from lerobot.common.transport.utils import receive_bytes_in_chunks, services_pb2 + + """Test that shutdown event stops receiving mid-stream.""" + queue = Queue() + shutdown_event = Event() + shutdown_event.set() + + chunks = [ + services_pb2.InteractionMessage( + data=b"First ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN + ), + services_pb2.InteractionMessage( + data=b"Middle ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE + ), + services_pb2.InteractionMessage(data=b"Last", transfer_state=services_pb2.TransferState.TRANSFER_END), + ] + + receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) + + assert queue.empty() + + +@require_package("grpc") +def test_receive_bytes_in_chunks_only_begin_chunk(): + from lerobot.common.transport.utils import receive_bytes_in_chunks, services_pb2 + + """Test receiving only a BEGIN chunk without END.""" + queue = Queue() + shutdown_event = Event() + + chunks = [ + services_pb2.InteractionMessage( + data=b"Start", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN + ), + # No END chunk + ] + + receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) + + assert queue.empty() + + +@require_package("grpc") +def test_receive_bytes_in_chunks_missing_begin(): + from lerobot.common.transport.utils import receive_bytes_in_chunks, services_pb2 + + """Test receiving chunks starting with MIDDLE instead of BEGIN.""" + queue = Queue() + shutdown_event = Event() + + chunks = [ + # Missing BEGIN + services_pb2.InteractionMessage( + data=b"Middle", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE + ), + services_pb2.InteractionMessage(data=b"End", transfer_state=services_pb2.TransferState.TRANSFER_END), + ] + + receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) + + # The implementation continues from where it is, so we should get partial data + assert queue.get(timeout=0.01) == b"MiddleEnd" + assert queue.empty() + + +# Tests for state_to_bytes and bytes_to_state_dict +@require_package("grpc") +def test_state_to_bytes_empty_dict(): + from lerobot.common.transport.utils import bytes_to_state_dict, state_to_bytes + + """Test converting empty state dict to bytes.""" + state_dict = {} + data = state_to_bytes(state_dict) + reconstructed = bytes_to_state_dict(data) + assert reconstructed == state_dict + + +@require_package("grpc") +def test_bytes_to_state_dict_empty_data(): + from lerobot.common.transport.utils import bytes_to_state_dict + + """Test converting empty data to state dict.""" + with pytest.raises(EOFError): + bytes_to_state_dict(b"") + + +@require_package("grpc") +def test_state_to_bytes_simple_dict(): + from lerobot.common.transport.utils import bytes_to_state_dict, state_to_bytes + + """Test converting simple state dict to bytes.""" + state_dict = { + "layer1.weight": torch.randn(10, 5), + "layer1.bias": torch.randn(10), + "layer2.weight": torch.randn(1, 10), + "layer2.bias": torch.randn(1), + } + + data = state_to_bytes(state_dict) + assert isinstance(data, bytes) + assert len(data) > 0 + + reconstructed = bytes_to_state_dict(data) + + assert len(reconstructed) == len(state_dict) + for key in state_dict: + assert key in reconstructed + assert torch.allclose(state_dict[key], reconstructed[key]) + + +@require_package("grpc") +def test_state_to_bytes_various_dtypes(): + from lerobot.common.transport.utils import bytes_to_state_dict, state_to_bytes + + """Test converting state dict with various tensor dtypes.""" + state_dict = { + "float32": torch.randn(5, 5), + "float64": torch.randn(3, 3).double(), + "int32": torch.randint(0, 100, (4, 4), dtype=torch.int32), + "int64": torch.randint(0, 100, (2, 2), dtype=torch.int64), + "bool": torch.tensor([True, False, True]), + "uint8": torch.randint(0, 255, (3, 3), dtype=torch.uint8), + } + + data = state_to_bytes(state_dict) + reconstructed = bytes_to_state_dict(data) + + for key in state_dict: + assert reconstructed[key].dtype == state_dict[key].dtype + if state_dict[key].dtype == torch.bool: + assert torch.equal(state_dict[key], reconstructed[key]) + else: + assert torch.allclose(state_dict[key], reconstructed[key]) + + +@require_package("grpc") +def test_bytes_to_state_dict_invalid_data(): + from lerobot.common.transport.utils import bytes_to_state_dict + + """Test bytes_to_state_dict with invalid data.""" + with pytest.raises(UnpicklingError): + bytes_to_state_dict(b"This is not a valid torch save file") + + +@require_cuda +@require_package("grpc") +def test_state_to_bytes_various_dtypes_cuda(): + from lerobot.common.transport.utils import bytes_to_state_dict, state_to_bytes + + """Test converting state dict with various tensor dtypes.""" + state_dict = { + "float32": torch.randn(5, 5).cuda(), + "float64": torch.randn(3, 3).double().cuda(), + "int32": torch.randint(0, 100, (4, 4), dtype=torch.int32).cuda(), + "int64": torch.randint(0, 100, (2, 2), dtype=torch.int64).cuda(), + "bool": torch.tensor([True, False, True]), + "uint8": torch.randint(0, 255, (3, 3), dtype=torch.uint8), + } + + data = state_to_bytes(state_dict) + reconstructed = bytes_to_state_dict(data) + + for key in state_dict: + assert reconstructed[key].dtype == state_dict[key].dtype + if state_dict[key].dtype == torch.bool: + assert torch.equal(state_dict[key], reconstructed[key]) + else: + assert torch.allclose(state_dict[key], reconstructed[key]) + + +@require_package("grpc") +def test_python_object_to_bytes_none(): + from lerobot.common.transport.utils import bytes_to_python_object, python_object_to_bytes + + """Test converting None to bytes.""" + obj = None + data = python_object_to_bytes(obj) + reconstructed = bytes_to_python_object(data) + assert reconstructed is None + + +@pytest.mark.parametrize( + "obj", + [ + 42, + -123, + 3.14159, + -2.71828, + "Hello, World!", + "Unicode: 你好世界 🌍", + True, + False, + b"byte string", + [], + [1, 2, 3], + [1, "two", 3.0, True, None], + {}, + {"key": "value", "number": 123, "nested": {"a": 1}}, + (), + (1, 2, 3), + ], +) +@require_package("grpc") +def test_python_object_to_bytes_simple_types(obj): + from lerobot.common.transport.utils import bytes_to_python_object, python_object_to_bytes + + """Test converting simple Python types.""" + data = python_object_to_bytes(obj) + reconstructed = bytes_to_python_object(data) + assert reconstructed == obj + assert type(reconstructed) is type(obj) + + +@require_package("grpc") +def test_python_object_to_bytes_with_tensors(): + from lerobot.common.transport.utils import bytes_to_python_object, python_object_to_bytes + + """Test converting objects containing PyTorch tensors.""" + obj = { + "tensor": torch.randn(5, 5), + "list_with_tensor": [1, 2, torch.randn(3, 3), "string"], + "nested": { + "tensor1": torch.randn(2, 2), + "tensor2": torch.tensor([1, 2, 3]), + }, + } + + data = python_object_to_bytes(obj) + reconstructed = bytes_to_python_object(data) + + assert torch.allclose(obj["tensor"], reconstructed["tensor"]) + assert reconstructed["list_with_tensor"][0] == 1 + assert reconstructed["list_with_tensor"][3] == "string" + assert torch.allclose(obj["list_with_tensor"][2], reconstructed["list_with_tensor"][2]) + assert torch.allclose(obj["nested"]["tensor1"], reconstructed["nested"]["tensor1"]) + assert torch.equal(obj["nested"]["tensor2"], reconstructed["nested"]["tensor2"]) + + +@require_package("grpc") +def test_transitions_to_bytes_empty_list(): + from lerobot.common.transport.utils import bytes_to_transitions, transitions_to_bytes + + """Test converting empty transitions list.""" + transitions = [] + data = transitions_to_bytes(transitions) + reconstructed = bytes_to_transitions(data) + assert reconstructed == transitions + assert isinstance(reconstructed, list) + + +@require_package("grpc") +def test_transitions_to_bytes_single_transition(): + from lerobot.common.transport.utils import bytes_to_transitions, transitions_to_bytes + + """Test converting a single transition.""" + transition = Transition( + state={"image": torch.randn(3, 64, 64), "state": torch.randn(10)}, + action=torch.randn(5), + reward=torch.tensor(1.5), + done=torch.tensor(False), + next_state={"image": torch.randn(3, 64, 64), "state": torch.randn(10)}, + ) + + transitions = [transition] + data = transitions_to_bytes(transitions) + reconstructed = bytes_to_transitions(data) + + assert len(reconstructed) == 1 + + assert_transitions_equal(transitions[0], reconstructed[0]) + + +@require_package("grpc") +def assert_transitions_equal(t1: Transition, t2: Transition): + """Helper to assert two transitions are equal.""" + assert_observation_equal(t1["state"], t2["state"]) + assert torch.allclose(t1["action"], t2["action"]) + assert torch.allclose(t1["reward"], t2["reward"]) + assert torch.equal(t1["done"], t2["done"]) + assert_observation_equal(t1["next_state"], t2["next_state"]) + + +@require_package("grpc") +def assert_observation_equal(o1: dict, o2: dict): + """Helper to assert two observations are equal.""" + assert set(o1.keys()) == set(o2.keys()) + for key in o1: + assert torch.allclose(o1[key], o2[key]) + + +@require_package("grpc") +def test_transitions_to_bytes_multiple_transitions(): + from lerobot.common.transport.utils import bytes_to_transitions, transitions_to_bytes + + """Test converting multiple transitions.""" + transitions = [] + for i in range(5): + transition = Transition( + state={"data": torch.randn(10)}, + action=torch.randn(3), + reward=torch.tensor(float(i)), + done=torch.tensor(i == 4), + next_state={"data": torch.randn(10)}, + ) + transitions.append(transition) + + data = transitions_to_bytes(transitions) + reconstructed = bytes_to_transitions(data) + + assert len(reconstructed) == len(transitions) + for original, reconstructed_item in zip(transitions, reconstructed, strict=False): + assert_transitions_equal(original, reconstructed_item) + + +@require_package("grpc") +def test_receive_bytes_in_chunks_unknown_state(): + from lerobot.common.transport.utils import receive_bytes_in_chunks + + """Test receive_bytes_in_chunks with an unknown transfer state.""" + + # Mock the gRPC message object, which has `transfer_state` and `data` attributes. + class MockMessage: + def __init__(self, transfer_state, data): + self.transfer_state = transfer_state + self.data = data + + # 10 is not a valid TransferState enum value + bad_iterator = [MockMessage(transfer_state=10, data=b"bad_data")] + output_queue = Queue() + shutdown_event = Event() + + with pytest.raises(ValueError, match="Received unknown transfer state"): + receive_bytes_in_chunks(bad_iterator, output_queue, shutdown_event) diff --git a/tests/utils/test_process.py b/tests/utils/test_process.py new file mode 100644 index 0000000000..054a8593a5 --- /dev/null +++ b/tests/utils/test_process.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import multiprocessing +import os +import signal +import threading +from unittest.mock import patch + +import pytest + +from lerobot.common.utils.process import ProcessSignalHandler + + +# Fixture to reset shutdown_event_counter and original signal handlers before and after each test +@pytest.fixture(autouse=True) +def reset_globals_and_handlers(): + # Store original signal handlers + original_handlers = { + sig: signal.getsignal(sig) + for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT] + if hasattr(signal, sig.name) + } + + yield + + # Restore original signal handlers + for sig, handler in original_handlers.items(): + signal.signal(sig, handler) + + +def test_setup_process_handlers_event_with_threads(): + """Test that setup_process_handlers returns the correct event type.""" + handler = ProcessSignalHandler(use_threads=True) + shutdown_event = handler.shutdown_event + assert isinstance(shutdown_event, threading.Event), "Should be a threading.Event" + assert not shutdown_event.is_set(), "Event should initially be unset" + + +def test_setup_process_handlers_event_with_processes(): + """Test that setup_process_handlers returns the correct event type.""" + handler = ProcessSignalHandler(use_threads=False) + shutdown_event = handler.shutdown_event + assert isinstance(shutdown_event, type(multiprocessing.Event())), "Should be a multiprocessing.Event" + assert not shutdown_event.is_set(), "Event should initially be unset" + + +@pytest.mark.parametrize("use_threads", [True, False]) +@pytest.mark.parametrize( + "sig", + [ + signal.SIGINT, + signal.SIGTERM, + # SIGHUP and SIGQUIT are not reliably available on all platforms (e.g. Windows) + pytest.param( + signal.SIGHUP, + marks=pytest.mark.skipif(not hasattr(signal, "SIGHUP"), reason="SIGHUP not available"), + ), + pytest.param( + signal.SIGQUIT, + marks=pytest.mark.skipif(not hasattr(signal, "SIGQUIT"), reason="SIGQUIT not available"), + ), + ], +) +def test_signal_handler_sets_event(use_threads, sig): + """Test that the signal handler sets the event on receiving a signal.""" + handler = ProcessSignalHandler(use_threads=use_threads) + shutdown_event = handler.shutdown_event + + assert handler.counter == 0 + + os.kill(os.getpid(), sig) + + # In some environments, the signal might take a moment to be handled. + shutdown_event.wait(timeout=1.0) + + assert shutdown_event.is_set(), f"Event should be set after receiving signal {sig}" + + # Ensure the internal counter was incremented + assert handler.counter == 1 + + +@pytest.mark.parametrize("use_threads", [True, False]) +@patch("sys.exit") +def test_force_shutdown_on_second_signal(mock_sys_exit, use_threads): + """Test that a second signal triggers a force shutdown.""" + handler = ProcessSignalHandler(use_threads=use_threads) + + os.kill(os.getpid(), signal.SIGINT) + # Give a moment for the first signal to be processed + import time + + time.sleep(0.1) + os.kill(os.getpid(), signal.SIGINT) + + time.sleep(0.1) + + assert handler.counter == 2 + mock_sys_exit.assert_called_once_with(1) diff --git a/tests/utils/test_queue.py b/tests/utils/test_queue.py new file mode 100644 index 0000000000..863231e82b --- /dev/null +++ b/tests/utils/test_queue.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +import time +from queue import Queue + +from lerobot.common.utils.queue import get_last_item_from_queue + + +def test_get_last_item_single_item(): + """Test getting the last item when queue has only one item.""" + queue = Queue() + queue.put("single_item") + + result = get_last_item_from_queue(queue) + + assert result == "single_item" + assert queue.empty() + + +def test_get_last_item_multiple_items(): + """Test getting the last item when queue has multiple items.""" + queue = Queue() + items = ["first", "second", "third", "fourth", "last"] + + for item in items: + queue.put(item) + + result = get_last_item_from_queue(queue) + + assert result == "last" + assert queue.empty() + + +def test_get_last_item_different_types(): + """Test with different data types in the queue.""" + queue = Queue() + items = [1, 2.5, "string", {"key": "value"}, [1, 2, 3], ("tuple", "data")] + + for item in items: + queue.put(item) + + result = get_last_item_from_queue(queue) + + assert result == ("tuple", "data") + assert queue.empty() + + +def test_get_last_item_maxsize_queue(): + """Test with a queue that has a maximum size.""" + queue = Queue(maxsize=5) + + # Fill the queue + for i in range(5): + queue.put(i) + + # Give the queue time to fill + time.sleep(0.1) + + result = get_last_item_from_queue(queue) + + assert result == 4 + assert queue.empty() + + +def test_get_last_item_with_none_values(): + """Test with None values in the queue.""" + queue = Queue() + items = [1, None, 2, None, 3] + + for item in items: + queue.put(item) + + # Give the queue time to fill + time.sleep(0.1) + + result = get_last_item_from_queue(queue) + + assert result == 3 + assert queue.empty() + + +def test_get_last_item_blocking_timeout(): + """Test get_last_item_from_queue returns None on timeout.""" + queue = Queue() + result = get_last_item_from_queue(queue, block=True, timeout=0.1) + assert result is None + + +def test_get_last_item_non_blocking_empty(): + """Test get_last_item_from_queue with block=False on an empty queue returns None.""" + queue = Queue() + result = get_last_item_from_queue(queue, block=False) + assert result is None + + +def test_get_last_item_non_blocking_success(): + """Test get_last_item_from_queue with block=False on a non-empty queue.""" + queue = Queue() + items = ["first", "second", "last"] + for item in items: + queue.put(item) + + # Give the queue time to fill + time.sleep(0.1) + + result = get_last_item_from_queue(queue, block=False) + assert result == "last" + assert queue.empty() + + +def test_get_last_item_blocking_waits_for_item(): + """Test that get_last_item_from_queue waits for an item if block=True.""" + queue = Queue() + result = [] + + def producer(): + queue.put("item1") + queue.put("item2") + + def consumer(): + # This will block until the producer puts the first item + item = get_last_item_from_queue(queue, block=True, timeout=0.2) + result.append(item) + + producer_thread = threading.Thread(target=producer) + consumer_thread = threading.Thread(target=consumer) + + producer_thread.start() + consumer_thread.start() + + producer_thread.join() + consumer_thread.join() + + assert result == ["item2"] + assert queue.empty() diff --git a/tests/utils/test_replay_buffer.py b/tests/utils/test_replay_buffer.py new file mode 100644 index 0000000000..f7a055b20f --- /dev/null +++ b/tests/utils/test_replay_buffer.py @@ -0,0 +1,682 @@ +#!/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from typing import Callable + +import pytest +import torch + +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.utils.buffer import BatchTransition, ReplayBuffer, random_crop_vectorized +from tests.fixtures.constants import DUMMY_REPO_ID + + +def state_dims() -> list[str]: + return ["observation.image", "observation.state"] + + +@pytest.fixture +def replay_buffer() -> ReplayBuffer: + return create_empty_replay_buffer() + + +def clone_state(state: dict) -> dict: + return {k: v.clone() for k, v in state.items()} + + +def create_empty_replay_buffer( + optimize_memory: bool = False, + use_drq: bool = False, + image_augmentation_function: Callable | None = None, +) -> ReplayBuffer: + buffer_capacity = 10 + device = "cpu" + return ReplayBuffer( + buffer_capacity, + device, + state_dims(), + optimize_memory=optimize_memory, + use_drq=use_drq, + image_augmentation_function=image_augmentation_function, + ) + + +def create_random_image() -> torch.Tensor: + return torch.rand(3, 84, 84) + + +def create_dummy_transition() -> dict: + return { + "observation.image": create_random_image(), + "action": torch.randn(4), + "reward": torch.tensor(1.0), + "observation.state": torch.randn( + 10, + ), + "done": torch.tensor(False), + "truncated": torch.tensor(False), + "complementary_info": {}, + } + + +def create_dataset_from_replay_buffer(tmp_path) -> tuple[LeRobotDataset, ReplayBuffer]: + dummy_state_1 = create_dummy_state() + dummy_action_1 = create_dummy_action() + + dummy_state_2 = create_dummy_state() + dummy_action_2 = create_dummy_action() + + dummy_state_3 = create_dummy_state() + dummy_action_3 = create_dummy_action() + + dummy_state_4 = create_dummy_state() + dummy_action_4 = create_dummy_action() + + replay_buffer = create_empty_replay_buffer() + replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_1, False, False) + replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_2, False, False) + replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_3, True, True) + replay_buffer.add(dummy_state_4, dummy_action_4, 1.0, dummy_state_4, True, True) + + root = tmp_path / "test" + return (replay_buffer.to_lerobot_dataset(DUMMY_REPO_ID, root=root), replay_buffer) + + +def create_dummy_state() -> dict: + return { + "observation.image": create_random_image(), + "observation.state": torch.randn( + 10, + ), + } + + +def get_tensor_memory_consumption(tensor): + return tensor.nelement() * tensor.element_size() + + +def get_tensors_memory_consumption(obj, visited_addresses): + total_size = 0 + + address = id(obj) + if address in visited_addresses: + return 0 + + visited_addresses.add(address) + + if isinstance(obj, torch.Tensor): + return get_tensor_memory_consumption(obj) + elif isinstance(obj, (list, tuple)): + for item in obj: + total_size += get_tensors_memory_consumption(item, visited_addresses) + elif isinstance(obj, dict): + for value in obj.values(): + total_size += get_tensors_memory_consumption(value, visited_addresses) + elif hasattr(obj, "__dict__"): + # It's an object, we need to get the size of the attributes + for _, attr in vars(obj).items(): + total_size += get_tensors_memory_consumption(attr, visited_addresses) + + return total_size + + +def get_object_memory(obj): + # Track visited addresses to avoid infinite loops + # and cases when two properties point to the same object + visited_addresses = set() + + # Get the size of the object in bytes + total_size = sys.getsizeof(obj) + + # Get the size of the tensor attributes + total_size += get_tensors_memory_consumption(obj, visited_addresses) + + return total_size + + +def create_dummy_action() -> torch.Tensor: + return torch.randn(4) + + +def dict_properties() -> list: + return ["state", "next_state"] + + +@pytest.fixture +def dummy_state() -> dict: + return create_dummy_state() + + +@pytest.fixture +def next_dummy_state() -> dict: + return create_dummy_state() + + +@pytest.fixture +def dummy_action() -> torch.Tensor: + return torch.randn(4) + + +def test_empty_buffer_sample_raises_error(replay_buffer): + assert len(replay_buffer) == 0, "Replay buffer should be empty." + assert replay_buffer.capacity == 10, "Replay buffer capacity should be 10." + with pytest.raises(RuntimeError, match="Cannot sample from an empty buffer"): + replay_buffer.sample(1) + + +def test_zero_capacity_buffer_raises_error(): + with pytest.raises(ValueError, match="Capacity must be greater than 0."): + ReplayBuffer(0, "cpu", ["observation", "next_observation"]) + + +def test_add_transition(replay_buffer, dummy_state, dummy_action): + replay_buffer.add(dummy_state, dummy_action, 1.0, dummy_state, False, False) + assert len(replay_buffer) == 1, "Replay buffer should have one transition after adding." + assert torch.equal(replay_buffer.actions[0], dummy_action), ( + "Action should be equal to the first transition." + ) + assert replay_buffer.rewards[0] == 1.0, "Reward should be equal to the first transition." + assert not replay_buffer.dones[0], "Done should be False for the first transition." + assert not replay_buffer.truncateds[0], "Truncated should be False for the first transition." + + for dim in state_dims(): + assert torch.equal(replay_buffer.states[dim][0], dummy_state[dim]), ( + "Observation should be equal to the first transition." + ) + assert torch.equal(replay_buffer.next_states[dim][0], dummy_state[dim]), ( + "Next observation should be equal to the first transition." + ) + + +def test_add_over_capacity(): + replay_buffer = ReplayBuffer(2, "cpu", ["observation", "next_observation"]) + dummy_state_1 = create_dummy_state() + dummy_action_1 = create_dummy_action() + + dummy_state_2 = create_dummy_state() + dummy_action_2 = create_dummy_action() + + dummy_state_3 = create_dummy_state() + dummy_action_3 = create_dummy_action() + + replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_1, False, False) + replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_2, False, False) + replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_3, True, True) + + assert len(replay_buffer) == 2, "Replay buffer should have 2 transitions after adding 3." + + for dim in state_dims(): + assert torch.equal(replay_buffer.states[dim][0], dummy_state_3[dim]), ( + "Observation should be equal to the first transition." + ) + assert torch.equal(replay_buffer.next_states[dim][0], dummy_state_3[dim]), ( + "Next observation should be equal to the first transition." + ) + + assert torch.equal(replay_buffer.actions[0], dummy_action_3), ( + "Action should be equal to the last transition." + ) + assert replay_buffer.rewards[0] == 1.0, "Reward should be equal to the last transition." + assert replay_buffer.dones[0], "Done should be True for the first transition." + assert replay_buffer.truncateds[0], "Truncated should be True for the first transition." + + +def test_sample_from_empty_buffer(replay_buffer): + with pytest.raises(RuntimeError, match="Cannot sample from an empty buffer"): + replay_buffer.sample(1) + + +def test_sample_with_1_transition(replay_buffer, dummy_state, next_dummy_state, dummy_action): + replay_buffer.add(dummy_state, dummy_action, 1.0, next_dummy_state, False, False) + got_batch_transition = replay_buffer.sample(1) + + expected_batch_transition = BatchTransition( + state=clone_state(dummy_state), + action=dummy_action.clone(), + reward=1.0, + next_state=clone_state(next_dummy_state), + done=False, + truncated=False, + ) + + for buffer_property in dict_properties(): + for k, v in expected_batch_transition[buffer_property].items(): + got_state = got_batch_transition[buffer_property][k] + + assert got_state.shape[0] == 1, f"{k} should have 1 transition." + assert got_state.device.type == "cpu", f"{k} should be on cpu." + + assert torch.equal(got_state[0], v), f"{k} should be equal to the expected batch transition." + + for key, _value in expected_batch_transition.items(): + if key in dict_properties(): + continue + + got_value = got_batch_transition[key] + + v_tensor = expected_batch_transition[key] + if not isinstance(v_tensor, torch.Tensor): + v_tensor = torch.tensor(v_tensor) + + assert got_value.shape[0] == 1, f"{key} should have 1 transition." + assert got_value.device.type == "cpu", f"{key} should be on cpu." + assert torch.equal(got_value[0], v_tensor), f"{key} should be equal to the expected batch transition." + + +def test_sample_with_batch_bigger_than_buffer_size( + replay_buffer, dummy_state, next_dummy_state, dummy_action +): + replay_buffer.add(dummy_state, dummy_action, 1.0, next_dummy_state, False, False) + got_batch_transition = replay_buffer.sample(10) + + expected_batch_transition = BatchTransition( + state=dummy_state, + action=dummy_action, + reward=1.0, + next_state=next_dummy_state, + done=False, + truncated=False, + ) + + for buffer_property in dict_properties(): + for k in expected_batch_transition[buffer_property]: + got_state = got_batch_transition[buffer_property][k] + + assert got_state.shape[0] == 1, f"{k} should have 1 transition." + + for key in expected_batch_transition: + if key in dict_properties(): + continue + + got_value = got_batch_transition[key] + assert got_value.shape[0] == 1, f"{key} should have 1 transition." + + +def test_sample_batch(replay_buffer): + dummy_state_1 = create_dummy_state() + dummy_action_1 = create_dummy_action() + + dummy_state_2 = create_dummy_state() + dummy_action_2 = create_dummy_action() + + dummy_state_3 = create_dummy_state() + dummy_action_3 = create_dummy_action() + + dummy_state_4 = create_dummy_state() + dummy_action_4 = create_dummy_action() + + replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_1, False, False) + replay_buffer.add(dummy_state_2, dummy_action_2, 2.0, dummy_state_2, False, False) + replay_buffer.add(dummy_state_3, dummy_action_3, 3.0, dummy_state_3, True, True) + replay_buffer.add(dummy_state_4, dummy_action_4, 4.0, dummy_state_4, True, True) + + dummy_states = [dummy_state_1, dummy_state_2, dummy_state_3, dummy_state_4] + dummy_actions = [dummy_action_1, dummy_action_2, dummy_action_3, dummy_action_4] + + got_batch_transition = replay_buffer.sample(3) + + for buffer_property in dict_properties(): + for k in got_batch_transition[buffer_property]: + got_state = got_batch_transition[buffer_property][k] + + assert got_state.shape[0] == 3, f"{k} should have 3 transition." + + for got_state_item in got_state: + assert any(torch.equal(got_state_item, dummy_state[k]) for dummy_state in dummy_states), ( + f"{k} should be equal to one of the dummy states." + ) + + for got_action_item in got_batch_transition["action"]: + assert any(torch.equal(got_action_item, dummy_action) for dummy_action in dummy_actions), ( + "Actions should be equal to the dummy actions." + ) + + for k in got_batch_transition: + if k in dict_properties() or k == "complementary_info": + continue + + got_value = got_batch_transition[k] + assert got_value.shape[0] == 3, f"{k} should have 3 transition." + + +def test_to_lerobot_dataset_with_empty_buffer(replay_buffer): + with pytest.raises(ValueError, match="The replay buffer is empty. Cannot convert to a dataset."): + replay_buffer.to_lerobot_dataset("dummy_repo") + + +def test_to_lerobot_dataset(tmp_path): + ds, buffer = create_dataset_from_replay_buffer(tmp_path) + + assert len(ds) == len(buffer), "Dataset should have the same size as the Replay Buffer" + assert ds.fps == 1, "FPS should be 1" + assert ds.repo_id == "dummy/repo", "The dataset should have `dummy/repo` repo id" + + for dim in state_dims(): + assert dim in ds.features + assert ds.features[dim]["shape"] == buffer.states[dim][0].shape + + assert ds.num_episodes == 2 + assert ds.num_frames == 4 + + for j, value in enumerate(ds): + print(torch.equal(value["observation.image"], buffer.next_states["observation.image"][j])) + + for i in range(len(ds)): + for feature, value in ds[i].items(): + if feature == "action": + assert torch.equal(value, buffer.actions[i]) + elif feature == "next.reward": + assert torch.equal(value, buffer.rewards[i]) + elif feature == "next.done": + assert torch.equal(value, buffer.dones[i]) + elif feature == "observation.image": + # Tenssor -> numpy is not precise, so we have some diff there + # TODO: Check and fix it + torch.testing.assert_close(value, buffer.states["observation.image"][i], rtol=0.3, atol=0.003) + elif feature == "observation.state": + assert torch.equal(value, buffer.states["observation.state"][i]) + + +def test_from_lerobot_dataset(tmp_path): + dummy_state_1 = create_dummy_state() + dummy_action_1 = create_dummy_action() + + dummy_state_2 = create_dummy_state() + dummy_action_2 = create_dummy_action() + + dummy_state_3 = create_dummy_state() + dummy_action_3 = create_dummy_action() + + dummy_state_4 = create_dummy_state() + dummy_action_4 = create_dummy_action() + + replay_buffer = create_empty_replay_buffer() + replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_1, False, False) + replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_2, False, False) + replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_3, True, True) + replay_buffer.add(dummy_state_4, dummy_action_4, 1.0, dummy_state_4, True, True) + + root = tmp_path / "test" + ds = replay_buffer.to_lerobot_dataset(DUMMY_REPO_ID, root=root) + + reconverted_buffer = ReplayBuffer.from_lerobot_dataset( + ds, state_keys=list(state_dims()), device="cpu", capacity=replay_buffer.capacity, use_drq=False + ) + + # Check only the part of the buffer that's actually filled with data + assert torch.equal( + reconverted_buffer.actions[: len(replay_buffer)], + replay_buffer.actions[: len(replay_buffer)], + ), "Actions from converted buffer should be equal to the original replay buffer." + assert torch.equal( + reconverted_buffer.rewards[: len(replay_buffer)], replay_buffer.rewards[: len(replay_buffer)] + ), "Rewards from converted buffer should be equal to the original replay buffer." + assert torch.equal( + reconverted_buffer.dones[: len(replay_buffer)], replay_buffer.dones[: len(replay_buffer)] + ), "Dones from converted buffer should be equal to the original replay buffer." + + # Lerobot DS haven't supported truncateds yet + expected_truncateds = torch.zeros(len(replay_buffer)).bool() + assert torch.equal(reconverted_buffer.truncateds[: len(replay_buffer)], expected_truncateds), ( + "Truncateds from converted buffer should be equal False" + ) + + assert torch.equal( + replay_buffer.states["observation.state"][: len(replay_buffer)], + reconverted_buffer.states["observation.state"][: len(replay_buffer)], + ), "State should be the same after converting to dataset and return back" + + for i in range(4): + torch.testing.assert_close( + replay_buffer.states["observation.image"][i], + reconverted_buffer.states["observation.image"][i], + rtol=0.4, + atol=0.004, + ) + + # The 2, 3 frames have done flag, so their values will be equal to the current state + for i in range(2): + # In the current implementation we take the next state from the `states` and ignore `next_states` + next_index = (i + 1) % 4 + + torch.testing.assert_close( + replay_buffer.states["observation.image"][next_index], + reconverted_buffer.next_states["observation.image"][i], + rtol=0.4, + atol=0.004, + ) + + for i in range(2, 4): + assert torch.equal( + replay_buffer.states["observation.state"][i], + reconverted_buffer.next_states["observation.state"][i], + ) + + +def test_buffer_sample_alignment(): + # Initialize buffer + buffer = ReplayBuffer(capacity=100, device="cpu", state_keys=["state_value"], storage_device="cpu") + + # Fill buffer with patterned data + for i in range(100): + signature = float(i) / 100.0 + state = {"state_value": torch.tensor([[signature]]).float()} + action = torch.tensor([[2.0 * signature]]).float() + reward = 3.0 * signature + + is_end = (i + 1) % 10 == 0 + if is_end: + next_state = {"state_value": torch.tensor([[signature]]).float()} + done = True + else: + next_signature = float(i + 1) / 100.0 + next_state = {"state_value": torch.tensor([[next_signature]]).float()} + done = False + + buffer.add(state, action, reward, next_state, done, False) + + # Sample and verify + batch = buffer.sample(50) + + for i in range(50): + state_sig = batch["state"]["state_value"][i].item() + action_val = batch["action"][i].item() + reward_val = batch["reward"][i].item() + next_state_sig = batch["next_state"]["state_value"][i].item() + is_done = batch["done"][i].item() > 0.5 + + # Verify relationships + assert abs(action_val - 2.0 * state_sig) < 1e-4, ( + f"Action {action_val} should be 2x state signature {state_sig}" + ) + + assert abs(reward_val - 3.0 * state_sig) < 1e-4, ( + f"Reward {reward_val} should be 3x state signature {state_sig}" + ) + + if is_done: + assert abs(next_state_sig - state_sig) < 1e-4, ( + f"For done states, next_state {next_state_sig} should equal state {state_sig}" + ) + else: + # Either it's the next sequential state (+0.01) or same state (for episode boundaries) + valid_next = ( + abs(next_state_sig - state_sig - 0.01) < 1e-4 or abs(next_state_sig - state_sig) < 1e-4 + ) + assert valid_next, ( + f"Next state {next_state_sig} should be either state+0.01 or same as state {state_sig}" + ) + + +def test_memory_optimization(): + dummy_state_1 = create_dummy_state() + dummy_action_1 = create_dummy_action() + + dummy_state_2 = create_dummy_state() + dummy_action_2 = create_dummy_action() + + dummy_state_3 = create_dummy_state() + dummy_action_3 = create_dummy_action() + + dummy_state_4 = create_dummy_state() + dummy_action_4 = create_dummy_action() + + replay_buffer = create_empty_replay_buffer() + replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_2, False, False) + replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_3, False, False) + replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_4, False, False) + replay_buffer.add(dummy_state_4, dummy_action_4, 1.0, dummy_state_4, True, True) + + optimized_replay_buffer = create_empty_replay_buffer(True) + optimized_replay_buffer.add(dummy_state_1, dummy_action_1, 1.0, dummy_state_2, False, False) + optimized_replay_buffer.add(dummy_state_2, dummy_action_2, 1.0, dummy_state_3, False, False) + optimized_replay_buffer.add(dummy_state_3, dummy_action_3, 1.0, dummy_state_4, False, False) + optimized_replay_buffer.add(dummy_state_4, dummy_action_4, 1.0, None, True, True) + + assert get_object_memory(optimized_replay_buffer) < get_object_memory(replay_buffer), ( + "Optimized replay buffer should be smaller than the original replay buffer" + ) + + +def test_check_image_augmentations_with_drq_and_dummy_image_augmentation_function(dummy_state, dummy_action): + def dummy_image_augmentation_function(x): + return torch.ones_like(x) * 10 + + replay_buffer = create_empty_replay_buffer( + use_drq=True, image_augmentation_function=dummy_image_augmentation_function + ) + + replay_buffer.add(dummy_state, dummy_action, 1.0, dummy_state, False, False) + + sampled_transitions = replay_buffer.sample(1) + assert torch.all(sampled_transitions["state"]["observation.image"] == 10), ( + "Image augmentations should be applied" + ) + assert torch.all(sampled_transitions["next_state"]["observation.image"] == 10), ( + "Image augmentations should be applied" + ) + + +def test_check_image_augmentations_with_drq_and_default_image_augmentation_function( + dummy_state, dummy_action +): + replay_buffer = create_empty_replay_buffer(use_drq=True) + + replay_buffer.add(dummy_state, dummy_action, 1.0, dummy_state, False, False) + + # Let's check that it doesn't fail and shapes are correct + sampled_transitions = replay_buffer.sample(1) + assert sampled_transitions["state"]["observation.image"].shape == (1, 3, 84, 84) + assert sampled_transitions["next_state"]["observation.image"].shape == (1, 3, 84, 84) + + +def test_random_crop_vectorized_basic(): + # Create a batch of 2 images with known patterns + batch_size, channels, height, width = 2, 3, 10, 8 + images = torch.zeros((batch_size, channels, height, width)) + + # Fill with unique values for testing + for b in range(batch_size): + images[b] = b + 1 + + crop_size = (6, 4) # Smaller than original + cropped = random_crop_vectorized(images, crop_size) + + # Check output shape + assert cropped.shape == (batch_size, channels, *crop_size) + + # Check that values are preserved (should be either 1s or 2s for respective batches) + assert torch.all(cropped[0] == 1) + assert torch.all(cropped[1] == 2) + + +def test_random_crop_vectorized_invalid_size(): + images = torch.zeros((2, 3, 10, 8)) + + # Test crop size larger than image + with pytest.raises(ValueError, match="Requested crop size .* is bigger than the image size"): + random_crop_vectorized(images, (12, 8)) + + with pytest.raises(ValueError, match="Requested crop size .* is bigger than the image size"): + random_crop_vectorized(images, (10, 10)) + + +def _populate_buffer_for_async_test(capacity: int = 10) -> ReplayBuffer: + """Create a small buffer with deterministic 3×128×128 images and 11-D state.""" + buffer = ReplayBuffer( + capacity=capacity, + device="cpu", + state_keys=["observation.image", "observation.state"], + storage_device="cpu", + ) + + for i in range(capacity): + img = torch.ones(3, 128, 128) * i + state_vec = torch.arange(11).float() + i + state = { + "observation.image": img, + "observation.state": state_vec, + } + buffer.add( + state=state, + action=torch.tensor([0.0]), + reward=0.0, + next_state=state, + done=False, + truncated=False, + ) + return buffer + + +def test_async_iterator_shapes_basic(): + buffer = _populate_buffer_for_async_test() + batch_size = 2 + iterator = buffer.get_iterator(batch_size=batch_size, async_prefetch=True, queue_size=1) + batch = next(iterator) + + images = batch["state"]["observation.image"] + states = batch["state"]["observation.state"] + + assert images.shape == (batch_size, 3, 128, 128) + assert states.shape == (batch_size, 11) + + next_images = batch["next_state"]["observation.image"] + next_states = batch["next_state"]["observation.state"] + + assert next_images.shape == (batch_size, 3, 128, 128) + assert next_states.shape == (batch_size, 11) + + +def test_async_iterator_multiple_iterations(): + buffer = _populate_buffer_for_async_test() + batch_size = 2 + iterator = buffer.get_iterator(batch_size=batch_size, async_prefetch=True, queue_size=2) + + for _ in range(5): + batch = next(iterator) + images = batch["state"]["observation.image"] + states = batch["state"]["observation.state"] + assert images.shape == (batch_size, 3, 128, 128) + assert states.shape == (batch_size, 11) + + next_images = batch["next_state"]["observation.image"] + next_states = batch["next_state"]["observation.state"] + assert next_images.shape == (batch_size, 3, 128, 128) + assert next_states.shape == (batch_size, 11) + + # Ensure iterator can be disposed without blocking + del iterator From 402f14a93b3908714c1a52578dd58101b7956fa6 Mon Sep 17 00:00:00 2001 From: Dana Aubakirova <118912928+danaaubakirova@users.noreply.github.com> Date: Fri, 13 Jun 2025 14:17:59 +0200 Subject: [PATCH 47/88] fix(docs): SmolVLA fine-tuning getting started (#1201) Co-authored-by: Pepijn <138571049+pkooij@users.noreply.github.com> Co-authored-by: danaaubakirova Co-authored-by: Simon Alibert <75076266+aliberts@users.noreply.github.com> Co-authored-by: Francesco Capuano Co-authored-by: Steven Palma --- docs/source/_toctree.yml | 4 ++ docs/source/smolvla.mdx | 93 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 docs/source/smolvla.mdx diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 37938358ff..8430368e03 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -14,6 +14,10 @@ - local: hilserl_sim title: Train RL in Simulation title: "Tutorials" +- sections: + - local: smolvla + title: Finetune SmolVLA + title: "Policies" - sections: - local: so101 title: SO-101 diff --git a/docs/source/smolvla.mdx b/docs/source/smolvla.mdx new file mode 100644 index 0000000000..58340baa0d --- /dev/null +++ b/docs/source/smolvla.mdx @@ -0,0 +1,93 @@ +# Finetune SmolVLA + +SmolVLA is Hugging Face’s lightweight foundation model for robotics. Designed for easy fine-tuning on LeRobot datasets, it helps accelerate your development! + +

+ SmolVLA architecture. +
+ Figure 1. SmolVLA takes as input (i) multiple cameras views, (ii) the robot’s current sensorimotor state, and (iii) a natural language instruction, encoded into contextual features used to condition the action expert when generating an action chunk. +

+ +## Set Up Your Environment + +1. Install LeRobot by following our [Installation Guide](./installation). +2. Install SmolVLA dependencies by running: + + ```bash + pip install -e ".[smolvla]" + ``` + +## Collect a dataset + +SmolVLA is a base model, so fine-tuning on your own data is required for optimal performance in your setup. +We recommend recording ~50 episodes of your task as a starting point. Follow our guide to get started: [Recording a Dataset](https://huggingface.co/docs/lerobot/getting_started_real_world_robot#record-a-dataset) + + + +In your dataset, make sure to have enough demonstrations per each variation (e.g. the cube position on the table if it is cube pick-place task) you are introducing. + +We recommend checking out the dataset linked below for reference that was used in the [SmolVLA paper](https://huggingface.co/papers/2506.01844): + +🔗 [SVLA SO100 PickPlace](https://huggingface.co/spaces/lerobot/visualize_dataset?path=%2Flerobot%2Fsvla_so100_pickplace%2Fepisode_0) + +In this dataset, we recorded 50 episodes across 5 distinct cube positions. For each position, we collected 10 episodes of pick-and-place interactions. This structure, repeating each variation several times, helped the model generalize better. We tried similar dataset with 25 episodes, and it was not enough leading to a bad performance. So, the data quality and quantity is definitely a key. +After you have your dataset available on the Hub, you are good to go to use our finetuning script to adapt SmolVLA to your application. + + +## Finetune SmolVLA on your data + +Use [`smolvla_base`](https://hf.co/lerobot/smolvla_base), our pretrained 450M model, and fine-tune it on your data. +Training the model for 20k steps will roughly take ~4 hrs on a single A100 GPU. You should tune the number of steps based on performance and your use-case. + +If you don't have a gpu device, you can train using our notebook on [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) + +Pass your dataset to the training script using `--dataset.repo_id`. If you want to test your installation, run the following command where we use one of the datasets we collected for the [SmolVLA Paper](https://huggingface.co/papers/2506.01844). + +```bash +cd lerobot && python lerobot/scripts/train.py \ + --policy.path=lerobot/smolvla_base \ + --dataset.repo_id=${HF_USER}/mydataset \ + --batch_size=64 \ + --steps=20000 \ + --output_dir=outputs/train/my_smolvla \ + --job_name=my_smolvla_training \ + --policy.device=cuda \ + --wandb.enable=true +``` + + +You can start with a small batch size and increase it incrementally, if the GPU allows it, as long as loading times remain short. + + +Fine-tuning is an art. For a complete overview of the options for finetuning, run + +```bash +python lerobot/scripts/train.py --help +``` + +

+ Comparison of SmolVLA across task variations. +
+ Figure 2: Comparison of SmolVLA across task variations. From left to right: (1) pick-place cube counting, (2) pick-place cube counting, (3) pick-place cube counting under perturbations, and (4) generalization on pick-and-place of the lego block with real-world SO101. +

+ + +## Evaluate the finetuned model and run it in real-time + +Similarly for when recording an episode, it is recommended that you are logged in to the HuggingFace Hub. You can follow the corresponding steps: [Record a dataset](./getting_started_real_world_robot#record-a-dataset). +Once you are logged in, you can run inference in your setup by doing: + +```bash +python -m lerobot.record \ + --robot.type=so101_follower \ + --robot.port=/dev/ttyACM0 \ # <- Use your port + --robot.id=my_blue_follower_arm \ # <- Use your robot id + --robot.cameras="{ front: {type: opencv, index_or_path: 8, width: 640, height: 480, fps: 30}}" \ # <- Use your cameras + --dataset.single_task="Grasp a lego block and put it in the bin." \ # <- Use the same task description you used in your dataset recording + --dataset.repo_id=${HF_USER}/eval_DATASET_NAME_test \ # <- This will be the dataset name on HF Hub + --dataset.episode_time_s=50 \ + --dataset.num_episodes=10 \ + --policy.path=HF_USER/FINETUNE_MODEL_NAME # <- Use your fine-tuned model +``` + +Depending on your evaluation setup, you can configure the duration and the number of episodes to record for your evaluation suite. From c662f8cf14f21083bc3e0ae058addb77d1d6d1fc Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Fri, 13 Jun 2025 15:29:10 +0200 Subject: [PATCH 48/88] chore(teleop): print calibration path saved (#1286) --- lerobot/common/teleoperators/so100_leader/so100_leader.py | 2 +- lerobot/common/teleoperators/so101_leader/so101_leader.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/common/teleoperators/so100_leader/so100_leader.py b/lerobot/common/teleoperators/so100_leader/so100_leader.py index 900346ad55..59b083e3fd 100644 --- a/lerobot/common/teleoperators/so100_leader/so100_leader.py +++ b/lerobot/common/teleoperators/so100_leader/so100_leader.py @@ -112,7 +112,7 @@ def calibrate(self) -> None: self.bus.write_calibration(self.calibration) self._save_calibration() - logger.info(f"Calibration saved to {self.calibration_fpath}") + print(f"Calibration saved to {self.calibration_fpath}") def configure(self) -> None: self.bus.disable_torque() diff --git a/lerobot/common/teleoperators/so101_leader/so101_leader.py b/lerobot/common/teleoperators/so101_leader/so101_leader.py index d324e2a888..80ddfbb1d6 100644 --- a/lerobot/common/teleoperators/so101_leader/so101_leader.py +++ b/lerobot/common/teleoperators/so101_leader/so101_leader.py @@ -109,7 +109,7 @@ def calibrate(self) -> None: self.bus.write_calibration(self.calibration) self._save_calibration() - logger.info(f"Calibration saved to {self.calibration_fpath}") + print(f"Calibration saved to {self.calibration_fpath}") def configure(self) -> None: self.bus.disable_torque() From bd4cc25537b835ce882ef53fad9a7ebc95b95947 Mon Sep 17 00:00:00 2001 From: Adil Zouitine Date: Fri, 13 Jun 2025 17:07:11 +0200 Subject: [PATCH 49/88] chore(dependencies): add gamepad support with pygame and hidapi (#1287) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 1ebef75bff..31276a18b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,6 +86,7 @@ dora = [ ] dynamixel = ["dynamixel-sdk>=3.7.31"] feetech = ["feetech-servo-sdk>=1.0.0"] +gamepad = ["pygame>=2.5.1", "hidapi>=0.14.0"] intelrealsense = [ "pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'", "pyrealsense2-macosx>=2.54 ; sys_platform == 'darwin'", From 0bfc27d17f0f5d519f6f93e6a45ad59315d9cab1 Mon Sep 17 00:00:00 2001 From: Simon Alibert <75076266+aliberts@users.noreply.github.com> Date: Fri, 13 Jun 2025 18:23:07 +0200 Subject: [PATCH 50/88] Robot integration tutorial (#1285) --- docs/source/_toctree.yml | 2 + docs/source/integrate_hardware.mdx | 321 +++++++++++++++++++ lerobot/common/robots/robot.py | 87 ++++- lerobot/common/teleoperators/teleoperator.py | 85 ++++- 4 files changed, 483 insertions(+), 12 deletions(-) create mode 100644 docs/source/integrate_hardware.mdx diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 8430368e03..0e83a1fe0a 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -9,6 +9,8 @@ title: Getting Started with Real-World Robots - local: cameras title: Cameras + - local: integrate_hardware + title: Bring Your Own Hardware - local: hilserl title: Train a Robot with RL - local: hilserl_sim diff --git a/docs/source/integrate_hardware.mdx b/docs/source/integrate_hardware.mdx new file mode 100644 index 0000000000..0c29121d86 --- /dev/null +++ b/docs/source/integrate_hardware.mdx @@ -0,0 +1,321 @@ +# Bring Your Own Hardware + +This tutorial will explain how to integrate your own robot design into the LeRobot ecosystem and have it access all of our tools (data collection, control pipelines, policy training and inference). + +To that end, we provide the [`Robot`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robots/robot.py) base class in the LeRobot which specifies a standard interface for physical robot integration. Let's see how to implement it. + +## Prerequisites + +- Your own robot which exposes a communication interface (e.g. serial, CAN, TCP) +- A way to read sensor data and send motor commands programmatically, e.g. manufacturer's SDK or API, or your own protocol implementation. +- LeRobot installed in your environment. Follow our [Installation Guide](./installation). + +## Choose your motors + +If you're using Feetech or Dynamixel motors, LeRobot provides built-in bus interfaces: + +- [`FeetechMotorsBus`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/feetech/feetech.py) – for controlling Feetech servos +- [`DynamixelMotorsBus`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/dynamixel/dynamixel.py) – for controlling Dynamixel servos + +Please refer to the [`MotorsBus`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/motors_bus.py) abstract class to learn about its API. +For a good example of how it can be used, you can have a look at our own [SO101 follower implementation](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robots/so101_follower/so101_follower.py) + +Use these if compatible! Otherwise, you'll need to find or write a Python interface (not covered in this tutorial): +- Find an existing SDK in Python (or use bindings to C/C++) +- Or implement a basic communication wrapper (e.g., via pyserial, socket, or CANopen) + +You're not alone—many community contributions use custom boards or firmware! + +For Feetech and Dynamixel, we currently support these servos: + - Feetech: + - STS & SMS series (protocol 0): `sts3215`, `sts3250`, `sm8512bl` + - SCS series (protocol 1): `scs0009` + - Dynamixel (protocol 2.0 only): `xl330-m077`, `xl330-m288`, `xl430-w250`, `xm430-w350`, `xm540-w270`, `xc430-w150` + +If you are using Feetech or Dynamixel servos that are not in this list, you can add those in the [Feetech table](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/feetech/tables.py) or [Dynamixel table](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/dynamixel/tables.py). Depending on the model, this will require you to add model-specific information. In most cases though, there should be a lot of additions to do. + +In the next sections, we'll use a `FeetechMotorsBus` as the motors interface for the examples. Replace it and adapt to your motors if necessary. + +## Step 1: Subclass the `Robot` Interface + +You’ll first need to specify the config class and a string identifier (`name`) for your robot. If your robot has special needs that you'd like to be able to change easily, it should go here (e.g. port/address, baudrate). + +Here, we'll add the port name and one camera by default for our robot: +```python +from dataclasses import dataclass, field + +from lerobot.common.cameras import CameraConfig +from lerobot.common.cameras.opencv import OpenCVCameraConfig +from lerobot.common.robots import RobotConfig + + +@RobotConfig.register_subclass("my_cool_robot") +@dataclass +class MyCoolRobotConfig(RobotConfig): + port: str + cameras: dict[str, CameraConfig] = field( + default_factory={ + "cam_1": OpenCVCameraConfig( + index_or_path=2, + fps=30, + width=480, + height=640, + ), + } + ) +``` + +Have a look at our [Cameras tutorial](./cameras) to understand how to detect and add your camera. + +Next, we'll create our actual robot class which inherits from `Robot`. This abstract class defines a contract you must follow for your robot to be usable with the rest of the LeRobot tools. + +Here we'll create a simple 5-DoF robot with one camera. It could be a simple arm but notice that the `Robot` abstract class does not assume anything on your robot's form factor. You can let you imagination run wild when designing new robots! + +```python +from lerobot.common.cameras import make_cameras_from_configs +from lerobot.common.motors import Motor, MotorNormMode +from lerobot.common.motors.feetech import FeetechMotorsBus +from lerobot.common.robots import Robot + +class MyCoolRobot(Robot): + config_class = MyCoolRobotConfig + name = "my_cool_robot" + + def __init__(self, config: MyCoolRobotConfig): + super().__init__(config) + self.bus = FeetechMotorsBus( + port=self.config.port, + motors={ + "joint_1": Motor(1, "sts3250", MotorNormMode.RANGE_M100_100), + "joint_2": Motor(2, "sts3215", MotorNormMode.RANGE_M100_100), + "joint_3": Motor(3, "sts3215", MotorNormMode.RANGE_M100_100), + "joint_4": Motor(4, "sts3215", MotorNormMode.RANGE_M100_100), + "joint_5": Motor(5, "sts3215", MotorNormMode.RANGE_M100_100), + }, + calibration=self.calibration, + ) + self.cameras = make_cameras_from_configs(config.cameras) +``` + +## Step 2: Define Observation and Action Features + +These two properties define the *interface contract* between your robot and tools that consume it (such as data collection or learning pipelines). + +> [!WARNING] +> Note that these properties must be callable even if the robot is not yet connected, so avoid relying on runtime hardware state to define them. + +### `observation_features` + +This property should return a dictionary describing the structure of sensor outputs from your robot. The keys match what `get_observation()` returns, and the values describe either the shape (for arrays/images) or the type (for simple values). + +Example for our 5-DoF arm with one camera: +```python +@property +def _motors_ft(self) -> dict[str, type]: + return { + "joint_1.pos": float, + "joint_2.pos": float, + "joint_3.pos": float, + "joint_4.pos": float, + "joint_5.pos": float, + } + +@property +def _cameras_ft(self) -> dict[str, tuple]: + return { + cam: (self.cameras[cam].height, self.cameras[cam].width, 3) for cam in self.cameras + } + +@property +def observation_features(self) -> dict: + return {**self._motors_ft, **self._cameras_ft} +``` +In this case, observations consist of a simple dict storing each motor's position and a camera image. + +### `action_features` + +This property describes the commands your robot expects via `send_action()`. Again, keys must match the expected input format, and values define the shape/type of each command. + +Here, we simply use the same joints proprioceptive features (`self._motors_ft`) as with `observation_features`: the action sent will simply the goal position for each motor. +```python +def action_features(self) -> dict: + return self._motors_ft +``` + +## Step 3: Handle Connection and Disconnection + +These methods should handle opening and closing communication with your hardware (e.g. serial ports, CAN interfaces, USB devices, cameras). + +### `is_connected` + +This property should simply reflect that communication with the robot's hardware is established. When this property is `True`, it should be possible to read and write to the hardware using `get_observation()` and `send_action()`. + +```python +@property +def is_connected(self) -> bool: + return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) +``` + +### `connect()` + +This method should establish communication with the hardware. Moreover, if your robot needs calibration is not calibrated, it should start a calibration procedure by default. If your robot needs some specific configuration, this should also be called here. + +```python +def connect(self, calibrate: bool = True) -> None: + self.bus.connect() + if not self.is_calibrated and calibrate: + self.calibrate() + + for cam in self.cameras.values(): + cam.connect() + + self.configure() +``` + +### `disconnect()` + +This method should gracefully terminate communication with the hardware: free any related resources (threads or processes), close ports, etc. + +Here, we already handle this in our `MotorsBus` and `Camera` classes so we just need to call their own `disconnect()` methods: +```python +def disconnect(self) -> None: + self.bus.disconnect() + for cam in self.cameras.values(): + cam.disconnect() +``` + +## Step 4: Support Calibration and Configuration + +LeRobot supports saving and loading calibration data automatically. This is useful for joint offsets, zero positions, or sensor alignment. + +> Note that depending on your hardware, this may not apply. If that's the case, you can simply leave these methods as no-ops: +> ```python +> @property +> def is_calibrated(self) -> bool: +> return True +> +> def calibrate(self) -> None: +> pass +> ``` + +### `is_calibrated` + +This should reflect whether your robot has the required calibration loaded. + +```python +@property +def is_calibrated(self) -> bool: + return self.bus.is_calibrated +``` + +### `calibrate()` + +The goal of the calibration is twofold: + - Know the physical range of motion of each motors in order to only send commands within this range. + - Normalize raw motors positions to sensible continuous values (e.g. percentages, degrees) instead of arbitrary discrete value dependant on the specific motor used that will not replicate elsewhere. + +It should implement the logic for calibration (if relevant) and update the `self.calibration` dictionary. If you are using Feetech or Dynamixel motors, our bus interfaces already include methods to help with this. + +```python +def calibrate(self) -> None: + self.bus.disable_torque() + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) + + input(f"Move {self} to the middle of its range of motion and press ENTER....") + homing_offsets = self.bus.set_half_turn_homings() + + print( + "Move all joints sequentially through their entire ranges " + "of motion.\nRecording positions. Press ENTER to stop..." + ) + range_mins, range_maxes = self.bus.record_ranges_of_motion() + + self.calibration = {} + for motor, m in self.bus.motors.items(): + self.calibration[motor] = MotorCalibration( + id=m.id, + drive_mode=0, + homing_offset=homing_offsets[motor], + range_min=range_mins[motor], + range_max=range_maxes[motor], + ) + + self.bus.write_calibration(self.calibration) + self._save_calibration() + print("Calibration saved to", self.calibration_fpath) +``` + +### `configure()` + +Use this to set up any configuration for your hardware (servos control modes, controller gains, etc.). This should usually be run at connection time and be idempotent. + +```python +def configure(self) -> None: + with self.bus.torque_disabled(): + self.bus.configure_motors() + for motor in self.bus.motors: + self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) + self.bus.write("P_Coefficient", motor, 16) + self.bus.write("I_Coefficient", motor, 0) + self.bus.write("D_Coefficient", motor, 32) +``` + +## Step 5: Implement Sensors Reading and Action Sending + +These are the most important runtime functions: the core I/O loop. + +### `get_observation()` + +Returns a dictionary of sensor values from the robot. These typically include motor states, camera frames, various sensors, etc. In the LeRobot framework, these observations are what will be fed to a policy in order to predict the actions to take. The dictionary keys and structure must match `observation_features`. + +```python +def get_observation(self) -> dict[str, Any]: + if not self.is_connected: + raise RuntimeError("Robot is not connected") + + joint_pos = self.motor_interface.read_joint_positions() + gripper = self.motor_interface.read_gripper_state() + image = self.camera.get_frame() + + return { + "joint_positions": joint_pos, + "gripper_open": gripper, + "camera_image": image, + } +``` + +### `send_action()` + +Takes a dictionary that matches `action_features`, and sends it to your hardware. You can add safety limits (clipping, smoothing) and return what was actually sent. + +```python +def send_action(self, action: dict[str, Any]) -> dict[str, Any]: + if not self.is_connected: + raise RuntimeError("Robot is not connected") + + self.motor_interface.set_joint_positions(action["joint_position_goals"]) + self.motor_interface.set_gripper(action["gripper_command"]) + + return action +``` + + + +## Adding a Teleoperator + +For implementing teleoperation devices, we also provide a [`Teleoperator`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/teleoperators/teleoperator.py) base class. This class is very similar to the `Robot` base class and also doesn't assume anything on form factor. + +The main differences are in the I/O functions: a teleoperator allows you to produce action via `get_action` and can receive feedback actions via `send_feedback`. Feedback could be anything controllable on the teleoperation device that could help the person controlling it understand the consequences of the actions sent. Think motion/force feedback on a leader arm, vibrations on a gamepad controller for example. To implement a teleoperator, you can follow this same tutorial and adapt it for these two methods. + +## Wrapping Up + +Once your robot class is complete, you can leverage the LeRobot ecosystem: + +- Control your robot with available teleoperators or integrate directly your teleoperating device +- Record training data and visualize it +- Integrate it into RL or imitation learning pipelines + +Don't hesitate to reach out to the community for help on our [Discord](https://discord.gg/s3KuuzsPFb) 🤗 diff --git a/lerobot/common/robots/robot.py b/lerobot/common/robots/robot.py index e5af9e79f0..ec2b155f35 100644 --- a/lerobot/common/robots/robot.py +++ b/lerobot/common/robots/robot.py @@ -27,7 +27,16 @@ # TODO(aliberts): action/obs typing such as Generic[ObsType, ActType] similar to gym.Env ? # https://github.com/Farama-Foundation/Gymnasium/blob/3287c869f9a48d99454306b0d4b4ec537f0f35e3/gymnasium/core.py#L23 class Robot(abc.ABC): - """The main LeRobot class for implementing robots.""" + """ + The base abstract class for all LeRobot-compatible robots. + + This class provides a standardized interface for interacting with physical robots. + Subclasses must implement all abstract methods and properties to be usable. + + Attributes: + config_class (RobotConfig): The expected configuration class for this robot. + name (str): The unique robot name used to identify this robot type. + """ # Set these in ALL subclasses config_class: RobotConfig @@ -52,58 +61,124 @@ def __str__(self) -> str: @property @abc.abstractmethod def observation_features(self) -> dict: + """ + A dictionary describing the structure and types of the observations produced by the robot. + Its structure (keys) should match the structure of what is returned by :pymeth:`get_observation`. + Values for the dict should either be: + - The type of the value if it's a simple value, e.g. `float` for single proprioceptive value (a joint's position/velocity) + - A tuple representing the shape if it's an array-type value, e.g. `(height, width, channel)` for images + + Note: this property should be able to be called regardless of whether the robot is connected or not. + """ pass @property @abc.abstractmethod def action_features(self) -> dict: + """ + A dictionary describing the structure and types of the actions expected by the robot. Its structure + (keys) should match the structure of what is passed to :pymeth:`send_action`. Values for the dict + should be the type of the value if it's a simple value, e.g. `float` for single proprioceptive value + (a joint's goal position/velocity) + + Note: this property should be able to be called regardless of whether the robot is connected or not. + """ pass @property @abc.abstractmethod def is_connected(self) -> bool: + """ + Whether the robot is currently connected or not. If `False`, calling :pymeth:`get_observation` or + :pymeth:`send_action` should raise an error. + """ pass @abc.abstractmethod def connect(self, calibrate: bool = True) -> None: - """Connects to the robot.""" + """ + Establish communication with the robot. + + Args: + calibrate (bool): If True, automatically calibrate the robot after connecting if it's not + calibrated or needs calibration (this is hardware-dependant). + """ pass @property @abc.abstractmethod def is_calibrated(self) -> bool: + """Whether the robot is currently calibrated or not. Should be always `True` if not applicable""" pass @abc.abstractmethod def calibrate(self) -> None: - """Calibrates the robot.""" + """ + Calibrate the robot if applicable. If not, this should be a no-op. + + This method should collect any necessary data (e.g., motor offsets) and update the + :pyattr:`calibration` dictionary accordingly. + """ pass def _load_calibration(self, fpath: Path | None = None) -> None: + """ + Helper to load calibration data from the specified file. + + Args: + fpath (Path | None): Optional path to the calibration file. Defaults to `self.calibration_fpath`. + """ fpath = self.calibration_fpath if fpath is None else fpath with open(fpath) as f, draccus.config_type("json"): self.calibration = draccus.load(dict[str, MotorCalibration], f) def _save_calibration(self, fpath: Path | None = None) -> None: + """ + Helper to save calibration data to the specified file. + + Args: + fpath (Path | None): Optional path to save the calibration file. Defaults to `self.calibration_fpath`. + """ fpath = self.calibration_fpath if fpath is None else fpath with open(fpath, "w") as f, draccus.config_type("json"): draccus.dump(self.calibration, f, indent=4) @abc.abstractmethod def configure(self) -> None: + """ + Apply any one-time or runtime configuration to the robot. + This may include setting motor parameters, control modes, or initial state. + """ pass @abc.abstractmethod def get_observation(self) -> dict[str, Any]: - """Gets observation from the robot.""" + """ + Retrieve the current observation from the robot. + + Returns: + dict[str, Any]: A flat dictionary representing the robot's current sensory state. Its structure + should match :pymeth:`observation_features`. + """ + pass @abc.abstractmethod def send_action(self, action: dict[str, Any]) -> dict[str, Any]: - """Sends actions to the robot.""" + """ + Send an action command to the robot. + + Args: + action (dict[str, Any]): Dictionary representing the desired action. Its structure should match + :pymeth:`action_features`. + + Returns: + dict[str, Any]: The action actually sent to the motors potentially clipped or modified, e.g. by + safety limits on velocity. + """ pass @abc.abstractmethod def disconnect(self) -> None: - """Disconnects from the robot.""" + """Disconnect from the robot and perform any necessary cleanup.""" pass diff --git a/lerobot/common/teleoperators/teleoperator.py b/lerobot/common/teleoperators/teleoperator.py index d8715a5524..1fef8132a2 100644 --- a/lerobot/common/teleoperators/teleoperator.py +++ b/lerobot/common/teleoperators/teleoperator.py @@ -25,7 +25,16 @@ class Teleoperator(abc.ABC): - """The main LeRobot class for implementing teleoperation devices.""" + """ + The base abstract class for all LeRobot-compatible teleoperation devices. + + This class provides a standardized interface for interacting with physical teleoperators. + Subclasses must implement all abstract methods and properties to be usable. + + Attributes: + config_class (RobotConfig): The expected configuration class for this teleoperator. + name (str): The unique name used to identify this teleoperator type. + """ # Set these in ALL subclasses config_class: TeleoperatorConfig @@ -50,58 +59,122 @@ def __str__(self) -> str: @property @abc.abstractmethod def action_features(self) -> dict: + """ + A dictionary describing the structure and types of the actions produced by the teleoperator. Its + structure (keys) should match the structure of what is returned by :pymeth:`get_action`. Values for + the dict should be the type of the value if it's a simple value, e.g. `float` for single + proprioceptive value (a joint's goal position/velocity) + + Note: this property should be able to be called regardless of whether the robot is connected or not. + """ pass @property @abc.abstractmethod def feedback_features(self) -> dict: + """ + A dictionary describing the structure and types of the feedback actions expected by the robot. Its + structure (keys) should match the structure of what is passed to :pymeth:`send_feedback`. Values for + the dict should be the type of the value if it's a simple value, e.g. `float` for single + proprioceptive value (a joint's goal position/velocity) + + Note: this property should be able to be called regardless of whether the robot is connected or not. + """ pass @property @abc.abstractmethod def is_connected(self) -> bool: + """ + Whether the teleoperator is currently connected or not. If `False`, calling :pymeth:`get_action` + or :pymeth:`send_feedback` should raise an error. + """ pass @abc.abstractmethod def connect(self, calibrate: bool = True) -> None: - """Connects to the teleoperator.""" + """ + Establish communication with the teleoperator. + + Args: + calibrate (bool): If True, automatically calibrate the teleoperator after connecting if it's not + calibrated or needs calibration (this is hardware-dependant). + """ pass @property @abc.abstractmethod def is_calibrated(self) -> bool: + """Whether the teleoperator is currently calibrated or not. Should be always `True` if not applicable""" pass @abc.abstractmethod def calibrate(self) -> None: - """Calibrates the teleoperator.""" + """ + Calibrate the teleoperator if applicable. If not, this should be a no-op. + + This method should collect any necessary data (e.g., motor offsets) and update the + :pyattr:`calibration` dictionary accordingly. + """ pass def _load_calibration(self, fpath: Path | None = None) -> None: + """ + Helper to load calibration data from the specified file. + + Args: + fpath (Path | None): Optional path to the calibration file. Defaults to `self.calibration_fpath`. + """ fpath = self.calibration_fpath if fpath is None else fpath with open(fpath) as f, draccus.config_type("json"): self.calibration = draccus.load(dict[str, MotorCalibration], f) def _save_calibration(self, fpath: Path | None = None) -> None: + """ + Helper to save calibration data to the specified file. + + Args: + fpath (Path | None): Optional path to save the calibration file. Defaults to `self.calibration_fpath`. + """ fpath = self.calibration_fpath if fpath is None else fpath with open(fpath, "w") as f, draccus.config_type("json"): draccus.dump(self.calibration, f, indent=4) @abc.abstractmethod def configure(self) -> None: + """ + Apply any one-time or runtime configuration to the teleoperator. + This may include setting motor parameters, control modes, or initial state. + """ pass @abc.abstractmethod def get_action(self) -> dict[str, Any]: - """Gets the action to send to a teleoperator.""" + """ + Retrieve the current action from the teleoperator. + + Returns: + dict[str, Any]: A flat dictionary representing the teleoperator's current actions. Its + structure should match :pymeth:`observation_features`. + """ pass @abc.abstractmethod def send_feedback(self, feedback: dict[str, Any]) -> None: - """Sends feedback captured from a robot to the teleoperator.""" + """ + Send a feedback action command to the teleoperator. + + Args: + action (dict[str, Any]): Dictionary representing the desired action. Its structure should match + :pymeth:`action_features`. + + Returns: + dict[str, Any]: The action actually sent to the motors potentially clipped or modified, e.g. by + safety limits on velocity. + """ pass @abc.abstractmethod def disconnect(self) -> None: - """Disconnects from the teleoperator.""" + """Disconnect from the teleoperator and perform any necessary cleanup.""" pass From f775326abdb121e3ced1f02515dd6466ce02395d Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Fri, 13 Jun 2025 18:29:19 +0200 Subject: [PATCH 51/88] fix(docs): update send_feedback docstrings --- docs/source/integrate_hardware.mdx | 4 ---- lerobot/common/teleoperators/teleoperator.py | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/source/integrate_hardware.mdx b/docs/source/integrate_hardware.mdx index 0c29121d86..83db273d34 100644 --- a/docs/source/integrate_hardware.mdx +++ b/docs/source/integrate_hardware.mdx @@ -300,10 +300,6 @@ def send_action(self, action: dict[str, Any]) -> dict[str, Any]: return action ``` - - ## Adding a Teleoperator For implementing teleoperation devices, we also provide a [`Teleoperator`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/teleoperators/teleoperator.py) base class. This class is very similar to the `Robot` base class and also doesn't assume anything on form factor. diff --git a/lerobot/common/teleoperators/teleoperator.py b/lerobot/common/teleoperators/teleoperator.py index 1fef8132a2..a385173120 100644 --- a/lerobot/common/teleoperators/teleoperator.py +++ b/lerobot/common/teleoperators/teleoperator.py @@ -165,8 +165,8 @@ def send_feedback(self, feedback: dict[str, Any]) -> None: Send a feedback action command to the teleoperator. Args: - action (dict[str, Any]): Dictionary representing the desired action. Its structure should match - :pymeth:`action_features`. + feedback (dict[str, Any]): Dictionary representing the desired feedback. Its structure should match + :pymeth:`feedback_features`. Returns: dict[str, Any]: The action actually sent to the motors potentially clipped or modified, e.g. by From 88f137de5e007dd9ac1ef4a6d3794ab33b35e747 Mon Sep 17 00:00:00 2001 From: Pepijn <138571049+pkooij@users.noreply.github.com> Date: Fri, 13 Jun 2025 18:48:39 +0200 Subject: [PATCH 52/88] Add sim tutorial, fix lekiwi motor config, add notebook links (#1275) Co-authored-by: AdilZouitine Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Michel Aractingi Co-authored-by: s1lent4gnt Co-authored-by: Michel Aractingi Co-authored-by: Eugene Mironov Co-authored-by: imstevenpmwork Co-authored-by: Simon Alibert <75076266+aliberts@users.noreply.github.com> Co-authored-by: Steven Palma --- docs/source/_toctree.yml | 10 +- ...ted_real_world_robot.mdx => il_robots.mdx} | 5 +- docs/source/il_sim.mdx | 152 ++++++++++++++++++ docs/source/installation.mdx | 2 + docs/source/notebooks.mdx | 29 ++++ .../v2/batch_convert_dataset_v1_to_v2.py | 2 +- lerobot/common/robots/lekiwi/config_lekiwi.py | 2 +- lerobot/common/robots/lekiwi/lekiwi.mdx | 60 +++++++ lerobot/scripts/rl/eval_policy.py | 74 +++++++++ 9 files changed, 331 insertions(+), 5 deletions(-) rename docs/source/{getting_started_real_world_robot.mdx => il_robots.mdx} (98%) create mode 100644 docs/source/il_sim.mdx create mode 100644 docs/source/notebooks.mdx create mode 100644 lerobot/scripts/rl/eval_policy.py diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 0e83a1fe0a..ea80e82577 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -5,8 +5,10 @@ title: Installation title: Get started - sections: - - local: getting_started_real_world_robot - title: Getting Started with Real-World Robots + - local: il_robots + title: Imitation Learning for Robots + - local: il_sim + title: Imitation Learning in Sim - local: cameras title: Cameras - local: integrate_hardware @@ -30,6 +32,10 @@ - local: lekiwi title: LeKiwi title: "Robots" +- sections: + - local: notebooks + title: Notebooks + title: "Resources" - sections: - local: contributing title: Contribute to LeRobot diff --git a/docs/source/getting_started_real_world_robot.mdx b/docs/source/il_robots.mdx similarity index 98% rename from docs/source/getting_started_real_world_robot.mdx rename to docs/source/il_robots.mdx index 19392d4c89..d13e431c85 100644 --- a/docs/source/getting_started_real_world_robot.mdx +++ b/docs/source/il_robots.mdx @@ -1,4 +1,4 @@ -# Getting Started with Real-World Robots +# Imitation Learning on Real-World Robots This tutorial will explain how to train a neural network to control a real robot autonomously. @@ -273,6 +273,9 @@ python lerobot/scripts/train.py \ --resume=true ``` +#### Train using Collab +If your local computer doesn't have a powerful GPU you could utilize Google Collab to train your model by following the [ACT training notebook](./notebooks#training-act). + #### Upload policy checkpoints Once training is done, upload the latest checkpoint with: diff --git a/docs/source/il_sim.mdx b/docs/source/il_sim.mdx new file mode 100644 index 0000000000..625b2fc00d --- /dev/null +++ b/docs/source/il_sim.mdx @@ -0,0 +1,152 @@ +# Imitation Learning in Sim + +This tutorial will explain how to train a neural network to control a robot in simulation with imitation learning. + +**You'll learn:** +1. How to record a dataset in simulation with [gym-hil](https://github.com/huggingface/gym-hil) and visualize the dataset. +2. How to train a policy using your data. +3. How to evaluate your policy in simulation and visualize the results. + +For the simulation environment we use the same [repo](https://github.com/huggingface/gym-hil) that is also being used by the Human-In-the-Loop (HIL) reinforcement learning algorithm. +This environment is based on [MuJoCo](https://mujoco.org) and allows you to record datasets in LeRobotDataset format. +Teleoperation is easiest with a controller like the Logitech F710, but you can also use your keyboard if you are up for the challenge. + +## Installation + +First, install the `gym_hil` package within the LeRobot environment, go to your LeRobot folder and run this command: + +```bash +pip install -e ".[hilserl]" +``` + +## Teleoperate and Record a Dataset + +To use `gym_hil` with LeRobot, you need to use a configuration file. An example config file can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/env_config_gym_hil_il.json). + +To teleoperate and collect a dataset, we need to modify this config file and you should add your `repo_id` here: `"repo_id": "il_gym",` and `"num_episodes": 30,` and make sure you set `mode` to `record`, "mode": "record". + +If you do not have a Nvidia GPU also change `"device": "cuda"` parameter in the config file (for example to `mps` for MacOS). + +By default the config file assumes you use a controller. To use your keyboard please change the envoirment specified at `"task"` in the config file and set it to `"PandaPickCubeKeyboard-v0"`. + +Then we can run this command to start: + + + + +```bash +python lerobot/scripts/rl/gym_manipulator.py --config_path path/to/env_config_gym_hil_il.json +``` + + + + +```bash +mjpython lerobot/scripts/rl/gym_manipulator.py --config_path path/to/env_config_gym_hil_il.json +``` + + + + +Once rendered you can teleoperate the robot with the gamepad or keyboard, below you can find the gamepad/keyboard controls. + +Note that to teleoperate the robot you have to hold the "Human Take Over Pause Policy" Button `RB` to enable control! + +**Gamepad Controls** + +

+ Figure shows the control mappings on a Logitech gamepad. +

+

Gamepad button mapping for robot control and episode management

+ +**Keyboard controls** + +For keyboard controls use the `spacebar` to enable control and the following keys to move the robot: +```bash + Arrow keys: Move in X-Y plane + Shift and Shift_R: Move in Z axis + Right Ctrl and Left Ctrl: Open and close gripper + ESC: Exit +``` + +## Visualize a dataset + +If you uploaded your dataset to the hub you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id. + +

+ Figure shows the dataset visualizer +

+

Dataset visualizer

+ + +## Train a policy + +To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: +```bash +python lerobot/scripts/train.py \ + --dataset.repo_id=${HF_USER}/il_gym \ + --policy.type=act \ + --output_dir=outputs/train/il_sim_test \ + --job_name=il_sim_test \ + --policy.device=cuda \ + --wandb.enable=true +``` + +Let's explain the command: +1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/il_gym`. +2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. +4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon. +5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. + +Training should take several hours, 100k steps (which is the default) will take about 1h on Nvidia A100. You will find checkpoints in `outputs/train/il_sim_test/checkpoints`. + +#### Train using Collab +If your local computer doesn't have a powerful GPU you could utilize Google Collab to train your model by following the [ACT training notebook](./notebooks#training-act). + +#### Upload policy checkpoints + +Once training is done, upload the latest checkpoint with: +```bash +huggingface-cli upload ${HF_USER}/il_sim_test \ + outputs/train/il_sim_test/checkpoints/last/pretrained_model +``` + +You can also upload intermediate checkpoints with: +```bash +CKPT=010000 +huggingface-cli upload ${HF_USER}/il_sim_test${CKPT} \ + outputs/train/il_sim_test/checkpoints/${CKPT}/pretrained_model +``` + +## Evaluate your policy in Sim + +To evaluate your policy we have to use the config file that can be found [here](https://huggingface.co/datasets/aractingi/lerobot-example-config-files/blob/main/eval_config_gym_hil.json). + +Make sure to replace the `repo_id` with the dataset you trained on, for example `pepijn223/il_sim_dataset` and replace the `pretrained_policy_name_or_path` with your model id, for example `pepijn223/il_sim_model` + +Then you can run this command to visualize your trained policy + + + + +```bash +python lerobot/scripts/rl/eval_policy.py --config_path=path/to/eval_config_gym_hil.json +``` + + + + +```bash +mjpython lerobot/scripts/rl/eval_policy.py --config_path=path/to/eval_config_gym_hil.json +``` + + + + +> [!WARNING] +> While the main workflow of training ACT in simulation is straightforward, there is significant room for exploring how to set up the task, define the initial state of the environment, and determine the type of data required during collection to learn the most effective policy. If your trained policy doesn't perform well, investigate the quality of the dataset it was trained on using our visualizers, as well as the action values and various hyperparameters related to ACT and the simulation. + +Congrats 🎉, you have finished this tutorial. If you want to continue with using LeRobot in simulation follow this [Tutorial on reinforcement learning in sim with HIL-SERL](https://huggingface.co/docs/lerobot/hilserl_sim) + +> [!TIP] +> If you have any questions or need help, please reach out on [Discord](https://discord.com/invite/s3KuuzsPFb). diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx index acb2a7a59d..51474d8f7a 100644 --- a/docs/source/installation.mdx +++ b/docs/source/installation.mdx @@ -68,3 +68,5 @@ To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tra ```bash wandb login ``` + +You can now assemble your robot if it's not ready yet, look for your robot type on the left. Then follow the link below to use Lerobot with your robot. diff --git a/docs/source/notebooks.mdx b/docs/source/notebooks.mdx new file mode 100644 index 0000000000..729b31a99d --- /dev/null +++ b/docs/source/notebooks.mdx @@ -0,0 +1,29 @@ +# 🤗 LeRobot Notebooks + +This repository contains example notebooks for using LeRobot. These notebooks demonstrate how to train policies on real or simulation datasets using standardized policies. + +--- + +### Training ACT + +[ACT](https://huggingface.co/papers/2304.13705) (Action Chunking Transformer) is a transformer-based policy architecture for imitation learning that processes robot states and camera inputs to generate smooth, chunked action sequences. + +We provide a ready-to-run Google Colab notebook to help you train ACT policies using datasets from the Hugging Face Hub, with optional logging to Weights & Biases. + +| Notebook | Colab | +|:---------|:------| +| [Train ACT with LeRobot](https://github.com/huggingface/notebooks/blob/main/lerobot/training-act.ipynb) | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-act.ipynb) | + +Expected training time for 100k steps: ~1.5 hours on an NVIDIA A100 GPU with batch size of `64`. + +### Training SmolVLA + +[SmolVLA](https://huggingface.co/papers/2506.01844) is a small but efficient Vision-Language-Action model. It is compact in size with 450 M-parameter and is developed by Hugging Face. + +We provide a ready-to-run Google Colab notebook to help you train SmolVLA policies using datasets from the Hugging Face Hub, with optional logging to Weights & Biases. + +| Notebook | Colab | +| :-------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [Train SmolVLA with LeRobot](https://github.com/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) | + +Expected training time for 20k steps: ~5 hours on an NVIDIA A100 GPU with batch size of `64`. diff --git a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py b/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py index c31d2da0a5..9b21cf7ca4 100644 --- a/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py +++ b/lerobot/common/datasets/v2/batch_convert_dataset_v1_to_v2.py @@ -63,7 +63,7 @@ PUSHT_INFO = { "license": "mit", "url": "https://diffusion-policy.cs.columbia.edu/", - "paper": "https://huggingface.co/papers/2303.04137v5", + "paper": "https://huggingface.co/papers/2303.04137", "citation_bibtex": dedent(r""" @article{chi2024diffusionpolicy, author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song}, diff --git a/lerobot/common/robots/lekiwi/config_lekiwi.py b/lerobot/common/robots/lekiwi/config_lekiwi.py index 4bb5e4dc36..022d09cdd6 100644 --- a/lerobot/common/robots/lekiwi/config_lekiwi.py +++ b/lerobot/common/robots/lekiwi/config_lekiwi.py @@ -34,7 +34,7 @@ def lekiwi_cameras_config() -> dict[str, CameraConfig]: @RobotConfig.register_subclass("lekiwi") @dataclass class LeKiwiConfig(RobotConfig): - port = "/dev/ttyACM0" # port to connect to the bus + port: str = "/dev/ttyACM0" # port to connect to the bus disable_torque_on_disconnect: bool = True diff --git a/lerobot/common/robots/lekiwi/lekiwi.mdx b/lerobot/common/robots/lekiwi/lekiwi.mdx index 68082d8a22..dd39a90399 100644 --- a/lerobot/common/robots/lekiwi/lekiwi.mdx +++ b/lerobot/common/robots/lekiwi/lekiwi.mdx @@ -43,9 +43,69 @@ First, we will assemble the two SO100/SO101 arms. One to attach to the mobile ba - [Assemble SO101](./so101#step-by-step-assembly-instructions) - [Assemble LeKiwi](https://github.com/SIGRobotics-UIUC/LeKiwi/blob/main/Assembly.md) +### Find the USB ports associated with motor board + +To find the port for each bus servo adapter, run this script: +```bash +python lerobot/find_port.py +``` + + + + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/tty.usbmodem575E0032081'] +Remove the USB cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/tty.usbmodem575E0032081 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/tty.usbmodem575E0032081` corresponding to your board. + + + + +On Linux, you might need to give access to the USB ports by running: +```bash +sudo chmod 666 /dev/ttyACM0 +sudo chmod 666 /dev/ttyACM1 +``` + +Example output: + +``` +Finding all available ports for the MotorBus. +['/dev/ttyACM0'] +Remove the usb cable from your MotorsBus and press Enter when done. + +[...Disconnect corresponding leader or follower arm and press Enter...] + +The port of this MotorsBus is /dev/ttyACM0 +Reconnect the USB cable. +``` + +Where the found port is: `/dev/ttyACM0` corresponding to your board. + + + + ### Configure motors The instructions for configuring the motors can be found in the SO101 [docs](./so101#configure-the-motors). Besides the ids for the arm motors, we also need to set the motor ids for the mobile base. These need to be in a specific order to work. Below an image of the motor ids and motor mounting positions for the mobile base. Note that we only use one Motor Control board on LeKiwi. This means the motor ids for the wheels are 7, 8 and 9. +You can run this command to setup motors for LeKiwi. It will first setup the motors for arm (id 6..1) and then setup motors for wheels (9,8,7) + +```bash +python -m lerobot.setup_motors \ + --robot.type=lekiwi \ + --robot.port=/dev/tty.usbmodem58760431551 # <- paste here the port found at previous step +``` + Motor ID's for mobile robot ### Troubleshoot communication diff --git a/lerobot/scripts/rl/eval_policy.py b/lerobot/scripts/rl/eval_policy.py new file mode 100644 index 0000000000..3762719bf4 --- /dev/null +++ b/lerobot/scripts/rl/eval_policy.py @@ -0,0 +1,74 @@ +# !/usr/bin/env python + +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from lerobot.common.cameras import opencv # noqa: F401 +from lerobot.common.datasets.lerobot_dataset import LeRobotDataset +from lerobot.common.policies.factory import make_policy +from lerobot.common.robots import ( # noqa: F401 + RobotConfig, + make_robot_from_config, + so100_follower, +) +from lerobot.common.teleoperators import ( + gamepad, # noqa: F401 + so101_leader, # noqa: F401 +) +from lerobot.configs import parser +from lerobot.configs.train import TrainRLServerPipelineConfig +from lerobot.scripts.rl.gym_manipulator import make_robot_env + +logging.basicConfig(level=logging.INFO) + + +def eval_policy(env, policy, n_episodes): + sum_reward_episode = [] + for _ in range(n_episodes): + obs, _ = env.reset() + episode_reward = 0.0 + while True: + action = policy.select_action(obs) + obs, reward, terminated, truncated, _ = env.step(action) + episode_reward += reward + if terminated or truncated: + break + sum_reward_episode.append(episode_reward) + + logging.info(f"Success after 20 steps {sum_reward_episode}") + logging.info(f"success rate {sum(sum_reward_episode) / len(sum_reward_episode)}") + + +@parser.wrap() +def main(cfg: TrainRLServerPipelineConfig): + env_cfg = cfg.env + env = make_robot_env(env_cfg) + dataset_cfg = cfg.dataset + dataset = LeRobotDataset(repo_id=dataset_cfg.repo_id) + dataset_meta = dataset.meta + + policy = make_policy( + cfg=cfg.policy, + # env_cfg=cfg.env, + ds_meta=dataset_meta, + ) + policy.from_pretrained(env_cfg.pretrained_policy_name_or_path) + policy.eval() + + eval_policy(env, policy=policy, n_episodes=10) + + +if __name__ == "__main__": + main() From 814e48f04408505e2d292eb3c92b8a3328a9a062 Mon Sep 17 00:00:00 2001 From: Simon Alibert <75076266+aliberts@users.noreply.github.com> Date: Sat, 14 Jun 2025 01:47:22 +0200 Subject: [PATCH 53/88] Fixes on robot integration tutorial (#1290) --- docs/source/integrate_hardware.mdx | 33 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/docs/source/integrate_hardware.mdx b/docs/source/integrate_hardware.mdx index 83db273d34..f7de1cece8 100644 --- a/docs/source/integrate_hardware.mdx +++ b/docs/source/integrate_hardware.mdx @@ -20,7 +20,7 @@ If you're using Feetech or Dynamixel motors, LeRobot provides built-in bus inter Please refer to the [`MotorsBus`](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/motors_bus.py) abstract class to learn about its API. For a good example of how it can be used, you can have a look at our own [SO101 follower implementation](https://github.com/huggingface/lerobot/blob/main/lerobot/common/robots/so101_follower/so101_follower.py) -Use these if compatible! Otherwise, you'll need to find or write a Python interface (not covered in this tutorial): +Use these if compatible. Otherwise, you'll need to find or write a Python interface (not covered in this tutorial): - Find an existing SDK in Python (or use bindings to C/C++) - Or implement a basic communication wrapper (e.g., via pyserial, socket, or CANopen) @@ -32,7 +32,7 @@ For Feetech and Dynamixel, we currently support these servos: - SCS series (protocol 1): `scs0009` - Dynamixel (protocol 2.0 only): `xl330-m077`, `xl330-m288`, `xl430-w250`, `xm430-w350`, `xm540-w270`, `xc430-w150` -If you are using Feetech or Dynamixel servos that are not in this list, you can add those in the [Feetech table](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/feetech/tables.py) or [Dynamixel table](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/dynamixel/tables.py). Depending on the model, this will require you to add model-specific information. In most cases though, there should be a lot of additions to do. +If you are using Feetech or Dynamixel servos that are not in this list, you can add those in the [Feetech table](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/feetech/tables.py) or [Dynamixel table](https://github.com/huggingface/lerobot/blob/main/lerobot/common/motors/dynamixel/tables.py). Depending on the model, this will require you to add model-specific information. In most cases though, there shouldn't be a lot of additions to do. In the next sections, we'll use a `FeetechMotorsBus` as the motors interface for the examples. Replace it and adapt to your motors if necessary. @@ -158,7 +158,7 @@ def is_connected(self) -> bool: ### `connect()` -This method should establish communication with the hardware. Moreover, if your robot needs calibration is not calibrated, it should start a calibration procedure by default. If your robot needs some specific configuration, this should also be called here. +This method should establish communication with the hardware. Moreover, if your robot needs calibration and is not calibrated, it should start a calibration procedure by default. If your robot needs some specific configuration, this should also be called here. ```python def connect(self, calibrate: bool = True) -> None: @@ -272,30 +272,31 @@ Returns a dictionary of sensor values from the robot. These typically include mo ```python def get_observation(self) -> dict[str, Any]: if not self.is_connected: - raise RuntimeError("Robot is not connected") + raise ConnectionError(f"{self} is not connected.") - joint_pos = self.motor_interface.read_joint_positions() - gripper = self.motor_interface.read_gripper_state() - image = self.camera.get_frame() + # Read arm position + obs_dict = self.bus.sync_read("Present_Position") + obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} - return { - "joint_positions": joint_pos, - "gripper_open": gripper, - "camera_image": image, - } + # Capture images from cameras + for cam_key, cam in self.cameras.items(): + obs_dict[cam_key] = cam.async_read() + + return obs_dict ``` ### `send_action()` Takes a dictionary that matches `action_features`, and sends it to your hardware. You can add safety limits (clipping, smoothing) and return what was actually sent. +For simplicity, we won't be adding any modification of the actions in our example here. + ```python def send_action(self, action: dict[str, Any]) -> dict[str, Any]: - if not self.is_connected: - raise RuntimeError("Robot is not connected") + goal_pos = {key.removesuffix(".pos"): val for key, val in action.items()} - self.motor_interface.set_joint_positions(action["joint_position_goals"]) - self.motor_interface.set_gripper(action["gripper_command"]) + # Send goal position to the arm + self.bus.sync_write("Goal_Position", goal_pos) return action ``` From c54e9d4bf7141b70eae9d2a60a84e422330942b5 Mon Sep 17 00:00:00 2001 From: Michel Aractingi Date: Sat, 14 Jun 2025 09:10:09 +0200 Subject: [PATCH 54/88] Add keyboard teleop device to control the end effector robot (#1289) --- .../common/teleoperators/keyboard/__init__.py | 11 +- .../keyboard/configuration_keyboard.py | 6 + .../teleoperators/keyboard/teleop_keyboard.py | 92 +++++++++++++- lerobot/common/teleoperators/utils.py | 4 + lerobot/scripts/rl/gym_manipulator.py | 117 ++++++++++++++++-- 5 files changed, 215 insertions(+), 15 deletions(-) diff --git a/lerobot/common/teleoperators/keyboard/__init__.py b/lerobot/common/teleoperators/keyboard/__init__.py index 9d27a34d6b..5761bf788d 100644 --- a/lerobot/common/teleoperators/keyboard/__init__.py +++ b/lerobot/common/teleoperators/keyboard/__init__.py @@ -1,4 +1,9 @@ -from .configuration_keyboard import KeyboardTeleopConfig -from .teleop_keyboard import KeyboardTeleop +from .configuration_keyboard import KeyboardEndEffectorTeleopConfig, KeyboardTeleopConfig +from .teleop_keyboard import KeyboardEndEffectorTeleop, KeyboardTeleop -__all__ = ["KeyboardTeleopConfig", "KeyboardTeleop"] +__all__ = [ + "KeyboardTeleopConfig", + "KeyboardTeleop", + "KeyboardEndEffectorTeleopConfig", + "KeyboardEndEffectorTeleop", +] diff --git a/lerobot/common/teleoperators/keyboard/configuration_keyboard.py b/lerobot/common/teleoperators/keyboard/configuration_keyboard.py index ce6c9206e9..5d5ef364f7 100644 --- a/lerobot/common/teleoperators/keyboard/configuration_keyboard.py +++ b/lerobot/common/teleoperators/keyboard/configuration_keyboard.py @@ -24,3 +24,9 @@ class KeyboardTeleopConfig(TeleoperatorConfig): # TODO(Steven): Consider setting in here the keys that we want to capture/listen mock: bool = False + + +@TeleoperatorConfig.register_subclass("keyboard_ee") +@dataclass +class KeyboardEndEffectorTeleopConfig(KeyboardTeleopConfig): + use_gripper: bool = True diff --git a/lerobot/common/teleoperators/keyboard/teleop_keyboard.py b/lerobot/common/teleoperators/keyboard/teleop_keyboard.py index a72710e9d6..bd3ab903ef 100644 --- a/lerobot/common/teleoperators/keyboard/teleop_keyboard.py +++ b/lerobot/common/teleoperators/keyboard/teleop_keyboard.py @@ -24,7 +24,7 @@ from lerobot.common.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from ..teleoperator import Teleoperator -from .configuration_keyboard import KeyboardTeleopConfig +from .configuration_keyboard import KeyboardEndEffectorTeleopConfig, KeyboardTeleopConfig PYNPUT_AVAILABLE = True try: @@ -145,3 +145,93 @@ def disconnect(self) -> None: ) if self.listener is not None: self.listener.stop() + + +class KeyboardEndEffectorTeleop(KeyboardTeleop): + """ + Teleop class to use keyboard inputs for end effector control. + Designed to be used with the `So100FollowerEndEffector` robot. + """ + + config_class = KeyboardEndEffectorTeleopConfig + name = "keyboard_ee" + + def __init__(self, config: KeyboardEndEffectorTeleopConfig): + super().__init__(config) + self.config = config + self.misc_keys_queue = Queue() + + @property + def action_features(self) -> dict: + if self.config.use_gripper: + return { + "dtype": "float32", + "shape": (4,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3}, + } + else: + return { + "dtype": "float32", + "shape": (3,), + "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2}, + } + + def _on_press(self, key): + if hasattr(key, "char"): + key = key.char + self.event_queue.put((key, True)) + + def _on_release(self, key): + if hasattr(key, "char"): + key = key.char + self.event_queue.put((key, False)) + + def get_action(self) -> dict[str, Any]: + if not self.is_connected: + raise DeviceNotConnectedError( + "KeyboardTeleop is not connected. You need to run `connect()` before `get_action()`." + ) + + self._drain_pressed_keys() + delta_x = 0.0 + delta_y = 0.0 + delta_z = 0.0 + + # Generate action based on current key states + for key, val in self.current_pressed.items(): + if key == keyboard.Key.up: + delta_x = int(val) + elif key == keyboard.Key.down: + delta_x = -int(val) + elif key == keyboard.Key.left: + delta_y = int(val) + elif key == keyboard.Key.right: + delta_y = -int(val) + elif key == keyboard.Key.shift: + delta_z = -int(val) + elif key == keyboard.Key.shift_r: + delta_z = int(val) + elif key == keyboard.Key.ctrl_r: + # Gripper actions are expected to be between 0 (close), 1 (stay), 2 (open) + gripper_action = int(val) + 1 + elif key == keyboard.Key.ctrl_l: + gripper_action = int(val) - 1 + elif val: + # If the key is pressed, add it to the misc_keys_queue + # this will record key presses that are not part of the delta_x, delta_y, delta_z + # this is useful for retrieving other events like interventions for RL, episode success, etc. + self.misc_keys_queue.put(key) + + self.current_pressed.clear() + + action_dict = { + "delta_x": delta_x, + "delta_y": delta_y, + "delta_z": delta_z, + } + + gripper_action = 1 # default gripper action is to stay + if self.config.use_gripper: + action_dict["gripper"] = gripper_action + + return action_dict diff --git a/lerobot/common/teleoperators/utils.py b/lerobot/common/teleoperators/utils.py index d7b7bcf0e6..b49addc15f 100644 --- a/lerobot/common/teleoperators/utils.py +++ b/lerobot/common/teleoperators/utils.py @@ -49,5 +49,9 @@ def make_teleoperator_from_config(config: TeleoperatorConfig) -> Teleoperator: from .gamepad.teleop_gamepad import GamepadTeleop return GamepadTeleop(config) + elif config.type == "keyboard_ee": + from .keyboard.teleop_keyboard import KeyboardEndEffectorTeleop + + return KeyboardEndEffectorTeleop(config) else: raise ValueError(config.type) diff --git a/lerobot/scripts/rl/gym_manipulator.py b/lerobot/scripts/rl/gym_manipulator.py index 98445e6668..3d2a627778 100644 --- a/lerobot/scripts/rl/gym_manipulator.py +++ b/lerobot/scripts/rl/gym_manipulator.py @@ -58,10 +58,12 @@ ) from lerobot.common.teleoperators import ( gamepad, # noqa: F401 + keyboard, # noqa: F401 make_teleoperator_from_config, so101_leader, # noqa: F401 ) from lerobot.common.teleoperators.gamepad.teleop_gamepad import GamepadTeleop +from lerobot.common.teleoperators.keyboard.teleop_keyboard import KeyboardEndEffectorTeleop from lerobot.common.utils.robot_utils import busy_wait from lerobot.common.utils.utils import log_say from lerobot.configs import parser @@ -1191,7 +1193,7 @@ def _init_keyboard_events(self): "rerecord_episode": False, } - def _handle_key_press(self, key, keyboard): + def _handle_key_press(self, key, keyboard_device): """ Handle key press events. @@ -1202,10 +1204,10 @@ def _handle_key_press(self, key, keyboard): This method should be overridden in subclasses for additional key handling. """ try: - if key == keyboard.Key.esc: + if key == keyboard_device.Key.esc: self.keyboard_events["episode_end"] = True return - if key == keyboard.Key.left: + if key == keyboard_device.Key.left: self.keyboard_events["rerecord_episode"] = True return if hasattr(key, "char") and key.char == "s": @@ -1221,13 +1223,13 @@ def _init_keyboard_listener(self): This method sets up keyboard event handling if not in headless mode. """ - from pynput import keyboard + from pynput import keyboard as keyboard_device def on_press(key): with self.event_lock: - self._handle_key_press(key, keyboard) + self._handle_key_press(key, keyboard_device) - self.listener = keyboard.Listener(on_press=on_press) + self.listener = keyboard_device.Listener(on_press=on_press) self.listener.start() def _check_intervention(self): @@ -1403,7 +1405,7 @@ def _init_keyboard_events(self): super()._init_keyboard_events() self.keyboard_events["human_intervention_step"] = False - def _handle_key_press(self, key, keyboard): + def _handle_key_press(self, key, keyboard_device): """ Handle key presses including space for intervention toggle. @@ -1413,8 +1415,8 @@ def _handle_key_press(self, key, keyboard): Extends the base handler to respond to space key for toggling intervention. """ - super()._handle_key_press(key, keyboard) - if key == keyboard.Key.space: + super()._handle_key_press(key, keyboard_device) + if key == keyboard_device.Key.space: if not self.keyboard_events["human_intervention_step"]: logging.info( "Space key pressed. Human intervention required.\n" @@ -1574,7 +1576,7 @@ def __init__( print(" Y/Triangle button: End episode (SUCCESS)") print(" B/Circle button: Exit program") - def get_gamepad_action( + def get_teleop_commands( self, ) -> tuple[bool, np.ndarray, bool, bool, bool]: """ @@ -1643,7 +1645,7 @@ def step(self, action): terminate_episode, success, rerecord_episode, - ) = self.get_gamepad_action() + ) = self.get_teleop_commands() # Update episode ending state if requested if terminate_episode: @@ -1700,6 +1702,90 @@ def close(self): return self.env.close() +class KeyboardControlWrapper(GamepadControlWrapper): + """ + Wrapper that allows controlling a gym environment with a keyboard. + + This wrapper intercepts the step method and allows human input via keyboard + to override the agent's actions when desired. + + Inherits from GamepadControlWrapper to avoid code duplication. + """ + + def __init__( + self, + env, + teleop_device, # Accepts an instantiated teleoperator + use_gripper=False, # This should align with teleop_device's config + auto_reset=False, + ): + """ + Initialize the gamepad controller wrapper. + + Args: + env: The environment to wrap. + teleop_device: The instantiated teleoperation device (e.g., GamepadTeleop). + use_gripper: Whether to include gripper control (should match teleop_device.config.use_gripper). + auto_reset: Whether to auto reset the environment when episode ends. + """ + super().__init__(env, teleop_device, use_gripper, auto_reset) + + self.is_intervention_active = False + + logging.info("Keyboard control wrapper initialized with provided teleop_device.") + print("Keyboard controls:") + print(" Arrow keys: Move in X-Y plane") + print(" Shift and Shift_R: Move in Z axis") + print(" Right Ctrl and Left Ctrl: Open and close gripper") + print(" f: End episode with FAILURE") + print(" s: End episode with SUCCESS") + print(" r: End episode with RERECORD") + print(" i: Start/Stop Intervention") + + def get_teleop_commands( + self, + ) -> tuple[bool, np.ndarray, bool, bool, bool]: + action_dict = self.teleop_device.get_action() + episode_end_status = None + + # Unroll the misc_keys_queue to check for events related to intervention, episode success, etc. + while not self.teleop_device.misc_keys_queue.empty(): + key = self.teleop_device.misc_keys_queue.get() + if key == "i": + self.is_intervention_active = not self.is_intervention_active + elif key == "f": + episode_end_status = "failure" + elif key == "s": + episode_end_status = "success" + elif key == "r": + episode_end_status = "rerecord_episode" + + terminate_episode = episode_end_status is not None + success = episode_end_status == "success" + rerecord_episode = episode_end_status == "rerecord_episode" + + # Convert action_dict to numpy array based on expected structure + # Order: delta_x, delta_y, delta_z, gripper (if use_gripper) + action_list = [action_dict["delta_x"], action_dict["delta_y"], action_dict["delta_z"]] + if self.use_gripper: + # GamepadTeleop returns gripper action as 0 (close), 1 (stay), 2 (open) + # This needs to be consistent with what EEActionWrapper expects if it's used downstream + # EEActionWrapper for gripper typically expects 0.0 (closed) to 2.0 (open) + # For now, we pass the direct value from GamepadTeleop, ensure downstream compatibility. + gripper_val = action_dict.get("gripper", 1.0) # Default to 1.0 (stay) if not present + action_list.append(float(gripper_val)) + + gamepad_action_np = np.array(action_list, dtype=np.float32) + + return ( + self.is_intervention_active, + gamepad_action_np, + terminate_episode, + success, + rerecord_episode, + ) + + class GymHilDeviceWrapper(gym.Wrapper): def __init__(self, env, device="cpu"): super().__init__(env) @@ -1843,6 +1929,15 @@ def make_robot_env(cfg: EnvConfig) -> gym.Env: teleop_device=teleop_device, use_gripper=cfg.wrapper.use_gripper, ) + elif control_mode == "keyboard_ee": + assert isinstance(teleop_device, KeyboardEndEffectorTeleop), ( + "teleop_device must be an instance of KeyboardEndEffectorTeleop for keyboard control mode" + ) + env = KeyboardControlWrapper( + env=env, + teleop_device=teleop_device, + use_gripper=cfg.wrapper.use_gripper, + ) elif control_mode == "leader": env = GearedLeaderControlWrapper( env=env, From 3d920f7c41f279cddb6e09a9f4c316ded52ae083 Mon Sep 17 00:00:00 2001 From: tidely <43219534+tidely@users.noreply.github.com> Date: Sat, 14 Jun 2025 15:06:22 +0300 Subject: [PATCH 55/88] Improve type hints (#1293) --- lerobot/common/robots/robot.py | 4 ++-- lerobot/common/teleoperators/teleoperator.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lerobot/common/robots/robot.py b/lerobot/common/robots/robot.py index ec2b155f35..76c57faf42 100644 --- a/lerobot/common/robots/robot.py +++ b/lerobot/common/robots/robot.py @@ -14,7 +14,7 @@ import abc from pathlib import Path -from typing import Any +from typing import Any, Type import draccus @@ -39,7 +39,7 @@ class Robot(abc.ABC): """ # Set these in ALL subclasses - config_class: RobotConfig + config_class: Type[RobotConfig] name: str def __init__(self, config: RobotConfig): diff --git a/lerobot/common/teleoperators/teleoperator.py b/lerobot/common/teleoperators/teleoperator.py index a385173120..6a20a3a8a7 100644 --- a/lerobot/common/teleoperators/teleoperator.py +++ b/lerobot/common/teleoperators/teleoperator.py @@ -14,7 +14,7 @@ import abc from pathlib import Path -from typing import Any +from typing import Any, Type import draccus @@ -37,7 +37,7 @@ class Teleoperator(abc.ABC): """ # Set these in ALL subclasses - config_class: TeleoperatorConfig + config_class: Type[TeleoperatorConfig] name: str def __init__(self, config: TeleoperatorConfig): From 67d016bdf8a885bfda8a1676daf9c8442bc5cecf Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Sat, 14 Jun 2025 14:23:07 +0200 Subject: [PATCH 56/88] fix(record): no teleop arg in reset environment (#1294) --- docs/source/il_robots.mdx | 4 ++++ docs/source/smolvla.mdx | 4 ++++ lerobot/record.py | 33 ++++++++++++++++++++++----------- 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx index d13e431c85..ddfcd98d07 100644 --- a/docs/source/il_robots.mdx +++ b/docs/source/il_robots.mdx @@ -303,6 +303,10 @@ python -m lerobot.record \ --display_data=false \ --dataset.repo_id=$HF_USER/eval_so100 \ --dataset.single_task="Put lego brick into the transparent box" \ + # <- Teleop optional if you want to teleoperate in between episodes \ + # --teleop.type=so100_leader \ + # --teleop.port=/dev/ttyACM0 \ + # --teleop.id=my_awesome_leader_arm \ --policy.path=${HF_USER}/my_policy ``` diff --git a/docs/source/smolvla.mdx b/docs/source/smolvla.mdx index 58340baa0d..1d6596f652 100644 --- a/docs/source/smolvla.mdx +++ b/docs/source/smolvla.mdx @@ -87,6 +87,10 @@ python -m lerobot.record \ --dataset.repo_id=${HF_USER}/eval_DATASET_NAME_test \ # <- This will be the dataset name on HF Hub --dataset.episode_time_s=50 \ --dataset.num_episodes=10 \ + # <- Teleop optional if you want to teleoperate in between episodes \ + # --teleop.type=so100_leader \ + # --teleop.port=/dev/ttyACM0 \ + # --teleop.id=my_red_leader_arm \ --policy.path=HF_USER/FINETUNE_MODEL_NAME # <- Use your fine-tuned model ``` diff --git a/lerobot/record.py b/lerobot/record.py index 884a3fcd6d..acc844ff90 100644 --- a/lerobot/record.py +++ b/lerobot/record.py @@ -23,12 +23,15 @@ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.cameras="{laptop: {type: opencv, camera_index: 0, width: 640, height: 480}}" \ --robot.id=black \ - --teleop.type=so100_leader \ - --teleop.port=/dev/tty.usbmodem58760431551 \ - --teleop.id=blue \ --dataset.repo_id=aliberts/record-test \ --dataset.num_episodes=2 \ - --dataset.single_task="Grab the cube" + --dataset.single_task="Grab the cube" \ + # <- Teleop optional if you want to teleoperate to record or in between episodes with a policy \ + # --teleop.type=so100_leader \ + # --teleop.port=/dev/tty.usbmodem58760431551 \ + # --teleop.id=blue \ + # <- Policy optional if you want to record with a policy \ + # --policy.path=${HF_USER}/my_policy \ ``` """ @@ -139,9 +142,6 @@ class RecordConfig: resume: bool = False def __post_init__(self): - if self.teleop is not None and self.policy is not None: - raise ValueError("Choose either a policy or a teleoperator to control the robot") - # HACK: We parse again the cli args here to get the pretrained path if there was one. policy_path = parser.get_path_arg("policy") if policy_path: @@ -149,6 +149,9 @@ def __post_init__(self): self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path + if self.teleop is None and self.policy is None: + raise ValueError("Choose a policy, a teleoperator or both to control the robot") + @classmethod def __get_path_fields__(cls) -> list[str]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" @@ -179,6 +182,10 @@ def record_loop( while timestamp < control_time_s: start_loop_t = time.perf_counter() + if events["exit_early"]: + events["exit_early"] = False + break + observation = robot.get_observation() if policy is not None or dataset is not None: @@ -194,8 +201,15 @@ def record_loop( robot_type=robot.robot_type, ) action = {key: action_values[i].item() for i, key in enumerate(robot.action_features)} - else: + elif policy is None and teleop is not None: action = teleop.get_action() + else: + logging.info( + "No policy or teleoperator provided, skipping action generation." + "This is likely to happen when resetting the environment without a teleop device." + "The robot won't be at its rest position at the start of the next episode." + ) + continue # Action can eventually be clipped using `max_relative_target`, # so action actually sent is saved in the dataset. @@ -220,9 +234,6 @@ def record_loop( busy_wait(1 / fps - dt_s) timestamp = time.perf_counter() - start_episode_t - if events["exit_early"]: - events["exit_early"] = False - break @parser.wrap() From 27e47fe46931e0a550addda3d8980ad028e408e5 Mon Sep 17 00:00:00 2001 From: Michel Aractingi Date: Sat, 14 Jun 2025 15:30:19 +0200 Subject: [PATCH 57/88] `learner.py` import so101_leader instead of so100 (#1295) Co-authored-by: Adil Zouitine --- lerobot/scripts/rl/learner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/scripts/rl/learner.py b/lerobot/scripts/rl/learner.py index 2d2c3755a9..663dbe9185 100644 --- a/lerobot/scripts/rl/learner.py +++ b/lerobot/scripts/rl/learner.py @@ -71,7 +71,7 @@ from lerobot.common.policies.factory import make_policy from lerobot.common.policies.sac.modeling_sac import SACPolicy from lerobot.common.robots import so100_follower # noqa: F401 -from lerobot.common.teleoperators import gamepad, so100_leader # noqa: F401 +from lerobot.common.teleoperators import gamepad, so101_leader # noqa: F401 from lerobot.common.transport import services_pb2_grpc from lerobot.common.transport.utils import ( bytes_to_python_object, From 39334480c55abba6540509be53d7b2f0a3a79af7 Mon Sep 17 00:00:00 2001 From: Francesco Capuano <74058581+fracapuano@users.noreply.github.com> Date: Sat, 14 Jun 2025 19:25:50 +0200 Subject: [PATCH 58/88] Fixing `PI0` Policy (#1297) --- lerobot/common/policies/pi0/paligemma_with_expert.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/common/policies/pi0/paligemma_with_expert.py b/lerobot/common/policies/pi0/paligemma_with_expert.py index 49c844c7bf..fb5077fb2c 100644 --- a/lerobot/common/policies/pi0/paligemma_with_expert.py +++ b/lerobot/common/policies/pi0/paligemma_with_expert.py @@ -223,7 +223,7 @@ def embed_image(self, image: torch.Tensor): return self.paligemma.model.get_image_features(image) def embed_language_tokens(self, tokens: torch.Tensor): - return self.paligemma.language_model.model.embed_tokens(tokens) + return self.paligemma.language_model.embed_tokens(tokens) # TODO: break down this huge forward into modules or functions def forward( @@ -235,7 +235,7 @@ def forward( use_cache: Optional[bool] = None, fill_kv_cache: Optional[bool] = None, ): - models = [self.paligemma.language_model.model, self.gemma_expert.model] + models = [self.paligemma.language_model, self.gemma_expert.model] for hidden_states in inputs_embeds: # TODO this is very inefficient From 2c83f2e9b098752c073dc621a74acce168969534 Mon Sep 17 00:00:00 2001 From: Michel Aractingi Date: Sat, 14 Jun 2025 20:53:40 +0200 Subject: [PATCH 59/88] `gym_manipulator.py` Remove None value action_intervention of BaseLeaderTeleoperator (#1299) --- lerobot/scripts/rl/gym_manipulator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/scripts/rl/gym_manipulator.py b/lerobot/scripts/rl/gym_manipulator.py index 3d2a627778..e7327d96dd 100644 --- a/lerobot/scripts/rl/gym_manipulator.py +++ b/lerobot/scripts/rl/gym_manipulator.py @@ -1343,7 +1343,7 @@ def step(self, action): # Add intervention info info["is_intervention"] = is_intervention - info["action_intervention"] = action if is_intervention else None + info["action_intervention"] = action self.prev_leader_gripper = np.clip( self.robot_leader.bus.sync_read("Present_Position")["gripper"], From c27735a0bbbedebc2f7cd8e72c3eabe5fb70ffe7 Mon Sep 17 00:00:00 2001 From: David <17435126+DavidLMS@users.noreply.github.com> Date: Sat, 14 Jun 2025 23:38:10 +0200 Subject: [PATCH 60/88] (chore): incorrect resume parameter in recording documentation (#1301) --- docs/source/il_robots.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx index ddfcd98d07..60b27d01b2 100644 --- a/docs/source/il_robots.mdx +++ b/docs/source/il_robots.mdx @@ -190,7 +190,7 @@ The `record` function provides a suite of tools for capturing and managing data ##### 2. Checkpointing and Resuming - Checkpoints are automatically created during recording. -- If an issue occurs, you can resume by re-running the same command with `--control.resume=true`. +- If an issue occurs, you can resume by re-running the same command with `--resume=true`. - To start recording from scratch, **manually delete** the dataset directory. ##### 3. Recording Parameters From ee63451335f28748b607040172b5733c81b7a991 Mon Sep 17 00:00:00 2001 From: koenvanwijk Date: Sat, 14 Jun 2025 23:41:45 +0200 Subject: [PATCH 61/88] Update lekiwi.mdx (#1229) --- lerobot/common/robots/lekiwi/lekiwi.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/common/robots/lekiwi/lekiwi.mdx b/lerobot/common/robots/lekiwi/lekiwi.mdx index dd39a90399..6eaebce799 100644 --- a/lerobot/common/robots/lekiwi/lekiwi.mdx +++ b/lerobot/common/robots/lekiwi/lekiwi.mdx @@ -197,10 +197,10 @@ leader.disconnect() To teleoperate, SSH into your Raspberry Pi, and run `conda activate lerobot` and this command: ```bash -python -m lerobot.common.robots.lekiwi.lekiwi_host +python -m lerobot.common.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi ``` -Then on your laptop, also run `conda activate lerobot` and run the API example, make sure you set the correct `remote_ip` and `port`. +Then on your laptop, also run `conda activate lerobot` and run the API example, make sure you set the correct `remote_ip` and `port` in `examples/lekiwi/teleoperate.py`. ```bash python examples/lekiwi/teleoperate.py From 16ce5e7d5f9c1249fec9230a42edce16a2b3b8a3 Mon Sep 17 00:00:00 2001 From: Francesco Capuano <74058581+fracapuano@users.noreply.github.com> Date: Sun, 15 Jun 2025 08:57:08 +0200 Subject: [PATCH 62/88] bump `pi0` and `hil` transformers version (#1298) --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 31276a18b0..5bff0fca6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -91,7 +91,7 @@ intelrealsense = [ "pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'", "pyrealsense2-macosx>=2.54 ; sys_platform == 'darwin'", ] -pi0 = ["transformers>=4.48.0"] +pi0 = ["transformers>=4.50.3"] smolvla = ["transformers>=4.50.3", "num2words>=0.5.14", "accelerate>=1.7.0", "safetensors>=0.4.3"] pusht = ["gym-pusht>=0.1.5 ; python_version < '4.0'"] stretch = [ @@ -100,7 +100,7 @@ stretch = [ "pyrealsense2>=2.55.1.6486 ; sys_platform != 'darwin'" ] test = ["pytest>=8.1.0", "pytest-timeout>=2.4.0", "pytest-cov>=5.0.0", "pyserial>=3.5", "mock-serial>=0.0.1 ; sys_platform != 'win32'"] -hilserl = ["transformers>=4.48", "gym-hil>=0.1.8", "protobuf>=5.29.3", "grpcio==1.71.0"] +hilserl = ["transformers>=4.50.3", "gym-hil>=0.1.8", "protobuf>=5.29.3", "grpcio==1.71.0"] umi = ["imagecodecs>=2024.1.1"] video_benchmark = ["scikit-image>=0.23.2", "pandas>=2.2.2"] xarm = ["gym-xarm>=0.1.1 ; python_version < '4.0'"] From 04d46e5b803f05ed29ec785afced9a59d8241f39 Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Sun, 15 Jun 2025 11:47:48 +0200 Subject: [PATCH 63/88] docs: fix imitation learning robots docs command (#1308) --- docs/source/il_robots.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/il_robots.mdx b/docs/source/il_robots.mdx index 60b27d01b2..f3b4b1a25f 100644 --- a/docs/source/il_robots.mdx +++ b/docs/source/il_robots.mdx @@ -301,7 +301,7 @@ python -m lerobot.record \ --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \ --robot.id=my_awesome_follower_arm \ --display_data=false \ - --dataset.repo_id=$HF_USER/eval_so100 \ + --dataset.repo_id=${HF_USER}/eval_so100 \ --dataset.single_task="Put lego brick into the transparent box" \ # <- Teleop optional if you want to teleoperate in between episodes \ # --teleop.type=so100_leader \ From e46cccb841e37b837e1a05e1f54c39948539aaee Mon Sep 17 00:00:00 2001 From: Steven Palma Date: Thu, 19 Jun 2025 17:07:13 +0200 Subject: [PATCH 64/88] fix(benchmarks): remove .numpy() from frame in benchmark script (#1354) --- benchmarks/video/capture_camera_feed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 benchmarks/video/capture_camera_feed.py diff --git a/benchmarks/video/capture_camera_feed.py b/benchmarks/video/capture_camera_feed.py old mode 100644 new mode 100755 index ce248f20b5..8f8530532d --- a/benchmarks/video/capture_camera_feed.py +++ b/benchmarks/video/capture_camera_feed.py @@ -55,7 +55,7 @@ def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height if not ret: print("Error: Could not read frame.") break - rr.log("video/stream", rr.Image(frame.numpy()), static=True) + rr.log("video/stream", rr.Image(frame), static=True) cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame) frame_index += 1 From fee9422b13861158392d78f857a9619345c242ab Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Sat, 21 Jun 2025 10:56:27 +0200 Subject: [PATCH 65/88] add smolvla to the supported policies to run tests (: --- lerobot/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/__init__.py b/lerobot/__init__.py index 11114da0ae..0dfdffb9be 100644 --- a/lerobot/__init__.py +++ b/lerobot/__init__.py @@ -168,7 +168,7 @@ ) # lists all available policies from `lerobot/common/policies` -available_policies = ["act", "diffusion", "tdmpc", "vqbet"] +available_policies = ["act", "diffusion", "tdmpc", "vqbet", "smolvla"] # lists all available robots from `lerobot/common/robot_devices/robots` available_robots = [ From 1317a999e2f098dc853375bbb7c9619d15e7011f Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Sat, 21 Jun 2025 10:56:49 +0200 Subject: [PATCH 66/88] add: chunk-level access for the policy --- lerobot/common/policies/act/modeling_act.py | 9 ++--- .../policies/smolvla/modeling_smolvla.py | 37 +++++++++++-------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index bbbc21b6e5..48de5b32d2 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -132,18 +132,15 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._action_queue) == 0: - actions = self.model(batch)[0][:, : self.config.n_action_steps] - - # TODO(rcadene): make _forward return output dictionary? - actions = self.unnormalize_outputs({"action": actions})["action"] + actions = self._predict_action_chunk(batch)[:, :self.config.n_action_steps] # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue # effectively has shape (n_action_steps, batch_size, *), hence the transpose. self._action_queue.extend(actions.transpose(0, 1)) return self._action_queue.popleft() - + @torch.no_grad - def predict_chunk(self, batch: dict[str, Tensor]) -> Tensor: + def _predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations. This method returns the raw chunk of actions predicted by the model without diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index 5e0a9622e0..c037043367 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -382,6 +382,27 @@ def _load_as_safetensor( def get_optim_params(self) -> dict: return self.parameters() + + def _predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + observation = self.normalize_inputs(batch) + + images, img_masks = self.prepare_images(observation) + state = self.prepare_state(observation) + lang_tokens, lang_masks = self.policy.prepare_language(observation) + + actions = self.policy.model.sample_actions( + images, img_masks, lang_tokens, lang_masks, state, noise=noise + ) + + # Unpad actions + original_action_dim = self.config.action_feature.shape[0] + actions = actions[:, :, :original_action_dim] + + actions = self.policy.unnormalize_outputs( + {"action": actions, "robot_type": [self.policy.config.robot_type]} + )["action"] + + return actions @torch.no_grad def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: @@ -402,21 +423,7 @@ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) - # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._queues[ACTION]) == 0: - for k in batch: - if k in self._queues: - batch[k] = torch.stack(list(self._queues[k]), dim=1) - images, img_masks = self.prepare_images(batch) - state = self.prepare_state(batch) - lang_tokens, lang_masks = self.prepare_language(batch) - - actions = self.model.sample_actions( - images, img_masks, lang_tokens, lang_masks, state, noise=noise - ) - # Unpad actions - original_action_dim = self.config.action_feature.shape[0] - actions = actions[:, :, :original_action_dim] - - actions = self.unnormalize_outputs({"action": actions})["action"] + actions = self._predict_action_chunk(batch, noise) if self.config.adapt_to_pi_aloha: actions = self._pi_aloha_encode_actions(actions) From ab064197a3b440598e23f557e7b7a21c2a2603c1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 09:04:10 +0000 Subject: [PATCH 67/88] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- lerobot/common/policies/act/modeling_act.py | 4 ++-- lerobot/common/policies/smolvla/modeling_smolvla.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index 48de5b32d2..e7478cbb3b 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -132,13 +132,13 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._action_queue) == 0: - actions = self._predict_action_chunk(batch)[:, :self.config.n_action_steps] + actions = self._predict_action_chunk(batch)[:, : self.config.n_action_steps] # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue # effectively has shape (n_action_steps, batch_size, *), hence the transpose. self._action_queue.extend(actions.transpose(0, 1)) return self._action_queue.popleft() - + @torch.no_grad def _predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations. diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index c037043367..aad3b565ea 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -382,7 +382,7 @@ def _load_as_safetensor( def get_optim_params(self) -> dict: return self.parameters() - + def _predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: observation = self.normalize_inputs(batch) From 7fbf738c66b6a20cf84d40fb744681f4e6c4dca7 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Sat, 21 Jun 2025 11:21:41 +0200 Subject: [PATCH 68/88] add: smolvla in availables --- tests/test_available.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_available.py b/tests/test_available.py index a18b95ffab..c6477050dc 100644 --- a/tests/test_available.py +++ b/tests/test_available.py @@ -21,6 +21,7 @@ import lerobot from lerobot.common.policies.act.modeling_act import ACTPolicy from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy +from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy from lerobot.common.policies.tdmpc.modeling_tdmpc import TDMPCPolicy from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTPolicy from tests.utils import require_env @@ -45,7 +46,7 @@ def test_available_policies(): This test verifies that the class attribute `name` for all policies is consistent with those listed in `lerobot/__init__.py`. """ - policy_classes = [ACTPolicy, DiffusionPolicy, TDMPCPolicy, VQBeTPolicy] + policy_classes = [ACTPolicy, DiffusionPolicy, TDMPCPolicy, VQBeTPolicy, SmolVLAPolicy] policies = [pol_cls.name for pol_cls in policy_classes] assert set(policies) == set(lerobot.available_policies), policies From b3078864a09d1d6c3577d4af837907c686a23d6f Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 11:51:18 +0200 Subject: [PATCH 69/88] remove: smolvla from library supported policies --- lerobot/__init__.py | 2 +- tests/test_available.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lerobot/__init__.py b/lerobot/__init__.py index 0dfdffb9be..11114da0ae 100644 --- a/lerobot/__init__.py +++ b/lerobot/__init__.py @@ -168,7 +168,7 @@ ) # lists all available policies from `lerobot/common/policies` -available_policies = ["act", "diffusion", "tdmpc", "vqbet", "smolvla"] +available_policies = ["act", "diffusion", "tdmpc", "vqbet"] # lists all available robots from `lerobot/common/robot_devices/robots` available_robots = [ diff --git a/tests/test_available.py b/tests/test_available.py index c6477050dc..a18b95ffab 100644 --- a/tests/test_available.py +++ b/tests/test_available.py @@ -21,7 +21,6 @@ import lerobot from lerobot.common.policies.act.modeling_act import ACTPolicy from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy -from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy from lerobot.common.policies.tdmpc.modeling_tdmpc import TDMPCPolicy from lerobot.common.policies.vqbet.modeling_vqbet import VQBeTPolicy from tests.utils import require_env @@ -46,7 +45,7 @@ def test_available_policies(): This test verifies that the class attribute `name` for all policies is consistent with those listed in `lerobot/__init__.py`. """ - policy_classes = [ACTPolicy, DiffusionPolicy, TDMPCPolicy, VQBeTPolicy, SmolVLAPolicy] + policy_classes = [ACTPolicy, DiffusionPolicy, TDMPCPolicy, VQBeTPolicy] policies = [pol_cls.name for pol_cls in policy_classes] assert set(policies) == set(lerobot.available_policies), policies From 197b3f4d85efed1107f8199cc94eea86a525525f Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 14:28:19 +0200 Subject: [PATCH 70/88] fix: change env for training, xarm is broken as of now --- Makefile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index c82483cc3b..8a2bdb010f 100644 --- a/Makefile +++ b/Makefile @@ -114,10 +114,9 @@ test-tdmpc-ete-train: python lerobot/scripts/train.py \ --policy.type=tdmpc \ --policy.device=$(DEVICE) \ - --env.type=xarm \ - --env.task=XarmLift-v0 \ + --env.type=pusht \ --env.episode_length=5 \ - --dataset.repo_id=lerobot/xarm_lift_medium \ + --dataset.repo_id=lerobot/pusht \ --dataset.image_transforms.enable=true \ --dataset.episodes="[0]" \ --batch_size=2 \ From 35b7a0848d48f64502154b7829d6f426b54a08d7 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 14:31:28 +0200 Subject: [PATCH 71/88] add: predict_action_chunk to all supported policies --- lerobot/common/policies/act/modeling_act.py | 31 ++-------- .../policies/diffusion/modeling_diffusion.py | 36 +++++------ lerobot/common/policies/pi0/modeling_pi0.py | 6 ++ .../policies/pi0fast/modeling_pi0fast.py | 6 ++ lerobot/common/policies/sac/modeling_sac.py | 5 ++ .../policies/smolvla/modeling_smolvla.py | 26 ++++---- .../common/policies/tdmpc/modeling_tdmpc.py | 60 ++++++++++--------- .../common/policies/vqbet/modeling_vqbet.py | 32 +++++----- 8 files changed, 105 insertions(+), 97 deletions(-) diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index e7478cbb3b..9d475251b7 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -114,25 +114,12 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: environment. It works by managing the actions in a queue and only calling `select_actions` when the queue is empty. """ - self.eval() - - batch = self.normalize_inputs(batch) - if self.config.image_features: - batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = [batch[key] for key in self.config.image_features] - - # If we are doing temporal ensembling, do online updates where we keep track of the number of actions - # we are ensembling over. - if self.config.temporal_ensemble_coeff is not None: - actions = self.model(batch)[0] # (batch_size, chunk_size, action_dim) - actions = self.unnormalize_outputs({"action": actions})["action"] - action = self.temporal_ensembler.update(actions) - return action + self.eval() # keeping the policy in eval mode as it could be set to train mode while queue is consumed # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._action_queue) == 0: - actions = self._predict_action_chunk(batch)[:, : self.config.n_action_steps] + actions = self.predict_action_chunk(batch)[:, : self.config.n_action_steps] # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue # effectively has shape (n_action_steps, batch_size, *), hence the transpose. @@ -140,18 +127,8 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: return self._action_queue.popleft() @torch.no_grad - def _predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: - """Predict a chunk of actions given environment observations. - - This method returns the raw chunk of actions predicted by the model without - any queue management or action consumption logic. - - Args: - batch: A dictionary of observation tensors. - - Returns: - A tensor of shape (batch_size, chunk_size, action_dim) containing predicted actions. - """ + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations.""" self.eval() batch = self.normalize_inputs(batch) diff --git a/lerobot/common/policies/diffusion/modeling_diffusion.py b/lerobot/common/policies/diffusion/modeling_diffusion.py index 446e2cb6ef..038136d07b 100644 --- a/lerobot/common/policies/diffusion/modeling_diffusion.py +++ b/lerobot/common/policies/diffusion/modeling_diffusion.py @@ -33,7 +33,7 @@ from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from torch import Tensor, nn -from lerobot.common.constants import OBS_ENV_STATE, OBS_STATE +from lerobot.common.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.pretrained import PreTrainedPolicy @@ -99,6 +99,18 @@ def reset(self): if self.config.env_state_feature: self._queues["observation.environment_state"] = deque(maxlen=self.config.n_obs_steps) + @torch.no_grad + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations.""" + # stack n latest observations from the queue + batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} + actions = self.diffusion.generate_actions(batch) + + # TODO(rcadene): make above methods return output dictionary? + actions = self.unnormalize_outputs({ACTION: actions})[ACTION] + + return actions + @torch.no_grad def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations. @@ -124,23 +136,15 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack( - [batch[key] for key in self.config.image_features], dim=-4 - ) + batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) # Note: It's important that this happens after stacking the images into a single key. self._queues = populate_queues(self._queues, batch) - if len(self._queues["action"]) == 0: - # stack n latest observations from the queue - batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} - actions = self.diffusion.generate_actions(batch) - - # TODO(rcadene): make above methods return output dictionary? - actions = self.unnormalize_outputs({"action": actions})["action"] + if len(self._queues[ACTION]) == 0: + actions = self.predict_action_chunk(batch) + self._queues[ACTION].extend(actions.transpose(0, 1)) - self._queues["action"].extend(actions.transpose(0, 1)) - - action = self._queues["action"].popleft() + action = self._queues[ACTION].popleft() return action def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, None]: @@ -148,9 +152,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, None]: batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack( - [batch[key] for key in self.config.image_features], dim=-4 - ) + batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) batch = self.normalize_targets(batch) loss = self.diffusion.compute_loss(batch) # no output_dict so returning None diff --git a/lerobot/common/policies/pi0/modeling_pi0.py b/lerobot/common/policies/pi0/modeling_pi0.py index 1d8a505592..e7cb7e1fb6 100644 --- a/lerobot/common/policies/pi0/modeling_pi0.py +++ b/lerobot/common/policies/pi0/modeling_pi0.py @@ -260,6 +260,12 @@ def reset(self): def get_optim_params(self) -> dict: return self.parameters() + @torch.no_grad + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations.""" + # NOTE(fracapuano): PI0 does not work, so I am excluding from https://github.com/huggingface/lerobot/pull/1020 + raise NotImplementedError("") + @torch.no_grad def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: """Select a single action given environment observations. diff --git a/lerobot/common/policies/pi0fast/modeling_pi0fast.py b/lerobot/common/policies/pi0fast/modeling_pi0fast.py index 7102bdded5..1c352466cd 100644 --- a/lerobot/common/policies/pi0fast/modeling_pi0fast.py +++ b/lerobot/common/policies/pi0fast/modeling_pi0fast.py @@ -192,6 +192,12 @@ def _pi_aloha_encode_actions_inv(self, actions): actions[:, :, motor_idx] = aloha_gripper_from_angular_inv(actions[:, :, motor_idx]) return actions + @torch.no_grad + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations.""" + # NOTE(fracapuan): PI0FAST does not work, so I am excluding from https://github.com/huggingface/lerobot/pull/1020 + raise NotImplementedError("") + @torch.no_grad def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations. diff --git a/lerobot/common/policies/sac/modeling_sac.py b/lerobot/common/policies/sac/modeling_sac.py index b588115ea0..1ca4693519 100644 --- a/lerobot/common/policies/sac/modeling_sac.py +++ b/lerobot/common/policies/sac/modeling_sac.py @@ -76,6 +76,11 @@ def reset(self): """Reset the policy""" pass + @torch.no_grad + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations.""" + raise NotImplementedError("SACPolicy does not support action chunking. It returns single actions!") + @torch.no_grad() def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select action for inference/evaluation""" diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index aad3b565ea..5bf09e5fcb 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -63,7 +63,7 @@ from torch import Tensor, nn from transformers import AutoProcessor -from lerobot.common.constants import ACTION, OBS_STATE +from lerobot.common.constants import ACTION, OBS_STATE, ROBOT_TYPE from lerobot.common.policies.normalize import ( Normalize, Unnormalize, @@ -384,6 +384,12 @@ def get_optim_params(self) -> dict: return self.parameters() def _predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + self.eval() + + if self.config.adapt_to_pi_aloha: + batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) + + batch = self.normalize_inputs(batch) observation = self.normalize_inputs(batch) images, img_masks = self.prepare_images(observation) @@ -399,8 +405,11 @@ def _predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = actions = actions[:, :, :original_action_dim] actions = self.policy.unnormalize_outputs( - {"action": actions, "robot_type": [self.policy.config.robot_type]} - )["action"] + {ACTION: actions, ROBOT_TYPE: [self.policy.config.robot_type]} + )[ACTION] + + if self.config.adapt_to_pi_aloha: + actions = self._pi_aloha_encode_actions(actions) return actions @@ -414,23 +423,16 @@ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) - """ self.eval() - if self.config.adapt_to_pi_aloha: - batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) - - batch = self.normalize_inputs(batch) - self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION]) # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._queues[ACTION]) == 0: actions = self._predict_action_chunk(batch, noise) - if self.config.adapt_to_pi_aloha: - actions = self._pi_aloha_encode_actions(actions) - - # `self.model.forward` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue + # `self.predict_action_chunk` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue # effectively has shape (n_action_steps, batch_size, *), hence the transpose. self._queues[ACTION].extend(actions.transpose(0, 1)[: self.config.n_action_steps]) + return self._queues[ACTION].popleft() def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> dict[str, Tensor]: diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index 476e6decd2..58810d289e 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -35,7 +35,7 @@ import torch.nn.functional as F # noqa: N812 from torch import Tensor -from lerobot.common.constants import OBS_ENV_STATE, OBS_STATE +from lerobot.common.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_STATE from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig @@ -110,43 +110,49 @@ def reset(self): # CEM for the next step. self._prev_mean: torch.Tensor | None = None + @torch.no_grad + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Predict a chunk of actions given environment observations.""" + batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch if key in self._queues} + + # Remove the time dimensions as it is not handled yet. + for key in batch: + assert batch[key].shape[1] == 1 + batch[key] = batch[key][:, 0] + + # NOTE: Order of observations matters here. + encode_keys = [] + if self.config.image_features: + encode_keys.append("observation.image") + if self.config.env_state_feature: + encode_keys.append("observation.environment_state") + encode_keys.append("observation.state") + z = self.model.encode({k: batch[k] for k in encode_keys}) + if self.config.use_mpc: # noqa: SIM108 + actions = self.plan(z) # (horizon, batch, action_dim) + else: + # Plan with the policy (π) alone. This always returns one action so unsqueeze to get a + # sequence dimension like in the MPC branch. + actions = self.model.pi(z).unsqueeze(0) + + actions = torch.clamp(actions, -1, +1) + + actions = self.unnormalize_outputs({"action": actions})["action"] + return actions + @torch.no_grad() def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations.""" batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.image"] = batch[next(iter(self.config.image_features))] + batch[OBS_IMAGE] = batch[next(iter(self.config.image_features))] self._queues = populate_queues(self._queues, batch) # When the action queue is depleted, populate it again by querying the policy. if len(self._queues["action"]) == 0: - batch = {key: torch.stack(list(self._queues[key]), dim=1) for key in batch if key in self._queues} - - # Remove the time dimensions as it is not handled yet. - for key in batch: - assert batch[key].shape[1] == 1 - batch[key] = batch[key][:, 0] - - # NOTE: Order of observations matters here. - encode_keys = [] - if self.config.image_features: - encode_keys.append("observation.image") - if self.config.env_state_feature: - encode_keys.append("observation.environment_state") - encode_keys.append("observation.state") - z = self.model.encode({k: batch[k] for k in encode_keys}) - if self.config.use_mpc: # noqa: SIM108 - actions = self.plan(z) # (horizon, batch, action_dim) - else: - # Plan with the policy (π) alone. This always returns one action so unsqueeze to get a - # sequence dimension like in the MPC branch. - actions = self.model.pi(z).unsqueeze(0) - - actions = torch.clamp(actions, -1, +1) - - actions = self.unnormalize_outputs({"action": actions})["action"] + actions = self.predict_action_chunk(batch) if self.config.n_action_repeats > 1: for _ in range(self.config.n_action_repeats): diff --git a/lerobot/common/policies/vqbet/modeling_vqbet.py b/lerobot/common/policies/vqbet/modeling_vqbet.py index 44006a5b21..a76bea2ab5 100644 --- a/lerobot/common/policies/vqbet/modeling_vqbet.py +++ b/lerobot/common/policies/vqbet/modeling_vqbet.py @@ -27,6 +27,7 @@ import torchvision from torch import Tensor, nn +from lerobot.common.constants import ACTION, OBS_IMAGES, OBS_STATE from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.utils import get_device_from_parameters, get_output_shape, populate_queues @@ -118,11 +119,18 @@ def reset(self): queues are populated during rollout of the policy, they contain the n latest observations and actions """ self._queues = { - "observation.images": deque(maxlen=self.config.n_obs_steps), - "observation.state": deque(maxlen=self.config.n_obs_steps), - "action": deque(maxlen=self.config.action_chunk_size), + OBS_IMAGES: deque(maxlen=self.config.n_obs_steps), + OBS_STATE: deque(maxlen=self.config.n_obs_steps), + ACTION: deque(maxlen=self.config.action_chunk_size), } + @torch.no_grad + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} + actions = self.vqbet(batch, rollout=True)[:, : self.config.action_chunk_size] + actions = self.unnormalize_outputs({ACTION: actions})[ACTION] + return actions + @torch.no_grad def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations. @@ -144,23 +152,19 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: stacklevel=1, ) - if len(self._queues["action"]) == 0: - batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} - actions = self.vqbet(batch, rollout=True)[:, : self.config.action_chunk_size] - - # the dimension of returned action is (batch_size, action_chunk_size, action_dim) - actions = self.unnormalize_outputs({"action": actions})["action"] + if len(self._queues[ACTION]) == 0: + actions = self.predict_action_chunk(batch) # since the data in the action queue's dimension is (action_chunk_size, batch_size, action_dim), we transpose the action and fill the queue - self._queues["action"].extend(actions.transpose(0, 1)) + self._queues[ACTION].extend(actions.transpose(0, 1)) - action = self._queues["action"].popleft() + action = self._queues[ACTION].popleft() return action def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) + batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) batch = self.normalize_targets(batch) # VQ-BeT discretizes action using VQ-VAE before training BeT (please refer to section 3.2 in the VQ-BeT paper https://huggingface.co/papers/2403.03181) if not self.vqbet.action_head.vqvae_model.discretized.item(): @@ -168,7 +172,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: # n_different_codes: how many of the total possible VQ codes are being used in single batch (how many of them have at least one encoder embedding as a nearest neighbor). This can be at most `vqvae_n_embed * number of layers of RVQ (=2)`. # n_different_combinations: how many different code combinations are being used out of all possible combinations in single batch. This can be at most `vqvae_n_embed ^ number of layers of RVQ (=2)` (hint consider the RVQ as a decision tree). loss, n_different_codes, n_different_combinations, recon_l1_error = ( - self.vqbet.action_head.discretize(self.config.n_vqvae_training_steps, batch["action"]) + self.vqbet.action_head.discretize(self.config.n_vqvae_training_steps, batch[ACTION]) ) return loss, { "n_different_codes": n_different_codes, @@ -404,7 +408,7 @@ def forward(self, batch: dict[str, Tensor], rollout: bool) -> tuple[dict, dict]: ) # else, it calculate overall loss (bin prediction loss, and offset loss) else: - output = batch["action"][:, self.select_target_actions_indices] + output = batch[ACTION][:, self.select_target_actions_indices] loss = self.action_head.loss_fn(action_head_output, output, reduction="mean") return action_head_output, loss From 6a8f2bff300403f93e2ee7ada656a27af00ac960 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 14:33:14 +0200 Subject: [PATCH 72/88] fix: add robot type constants --- lerobot/common/constants.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lerobot/common/constants.py b/lerobot/common/constants.py index 990f2aa1eb..30777239ef 100644 --- a/lerobot/common/constants.py +++ b/lerobot/common/constants.py @@ -25,6 +25,7 @@ REWARD = "next.reward" ROBOTS = "robots" +ROBOT_TYPE = "robot_type" TELEOPERATORS = "teleoperators" # files & directories From ffc3d8f34c49d49aff121e44aaa8e75d4e8f9d76 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 14:33:38 +0200 Subject: [PATCH 73/88] add: predict action chunk in base policy class --- lerobot/common/policies/pretrained.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lerobot/common/policies/pretrained.py b/lerobot/common/policies/pretrained.py index da4ef15721..fc1c61491a 100644 --- a/lerobot/common/policies/pretrained.py +++ b/lerobot/common/policies/pretrained.py @@ -189,6 +189,15 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict | None]: """ raise NotImplementedError + @abc.abstractmethod + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """Returns the action chunk (for action chunking policies) for a given observation, potentially in batch mode. + + Child classes using action chunking should use this method within `select_action` to form the action chunk + cached for selection. + """ + raise NotImplementedError + @abc.abstractmethod def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Return one action to run in the environment (potentially in batch mode). From a4074f38f736791f003132543d49486569089669 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 14:54:25 +0200 Subject: [PATCH 74/88] restore original Makefile --- Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 8a2bdb010f..0bbfec5e49 100644 --- a/Makefile +++ b/Makefile @@ -114,9 +114,11 @@ test-tdmpc-ete-train: python lerobot/scripts/train.py \ --policy.type=tdmpc \ --policy.device=$(DEVICE) \ - --env.type=pusht \ + --policy.push_to_hub=false \ + --env.type=xarm \ + --env.task=XarmLift-v0 \ --env.episode_length=5 \ - --dataset.repo_id=lerobot/pusht \ + --dataset.repo_id=lerobot/xarm_lift_medium \ --dataset.image_transforms.enable=true \ --dataset.episodes="[0]" \ --batch_size=2 \ From 21734e544dd46466084f4686fec3b67e0437c7c2 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 14:56:32 +0200 Subject: [PATCH 75/88] fix: minor --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 0bbfec5e49..c82483cc3b 100644 --- a/Makefile +++ b/Makefile @@ -114,7 +114,6 @@ test-tdmpc-ete-train: python lerobot/scripts/train.py \ --policy.type=tdmpc \ --policy.device=$(DEVICE) \ - --policy.push_to_hub=false \ --env.type=xarm \ --env.task=XarmLift-v0 \ --env.episode_length=5 \ From 8df180920366f441e29868eb2804ceec974a2050 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 14:57:08 +0200 Subject: [PATCH 76/88] fix: dict keys come from lerobot/constants --- .../common/policies/tdmpc/modeling_tdmpc.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index 58810d289e..4ee84404d9 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -35,7 +35,7 @@ import torch.nn.functional as F # noqa: N812 from torch import Tensor -from lerobot.common.constants import OBS_ENV_STATE, OBS_IMAGE, OBS_STATE +from lerobot.common.constants import ACTION, OBS_ENV_STATE, OBS_IMAGE, OBS_STATE, REWARD from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.pretrained import PreTrainedPolicy from lerobot.common.policies.tdmpc.configuration_tdmpc import TDMPCConfig @@ -123,10 +123,10 @@ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: # NOTE: Order of observations matters here. encode_keys = [] if self.config.image_features: - encode_keys.append("observation.image") + encode_keys.append(OBS_IMAGE) if self.config.env_state_feature: - encode_keys.append("observation.environment_state") - encode_keys.append("observation.state") + encode_keys.append(OBS_ENV_STATE) + encode_keys.append(OBS_STATE) z = self.model.encode({k: batch[k] for k in encode_keys}) if self.config.use_mpc: # noqa: SIM108 actions = self.plan(z) # (horizon, batch, action_dim) @@ -137,7 +137,7 @@ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: actions = torch.clamp(actions, -1, +1) - actions = self.unnormalize_outputs({"action": actions})["action"] + actions = self.unnormalize_outputs({ACTION: actions})[ACTION] return actions @torch.no_grad() @@ -156,12 +156,12 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: if self.config.n_action_repeats > 1: for _ in range(self.config.n_action_repeats): - self._queues["action"].append(actions[0]) + self._queues[ACTION].append(actions[0]) else: # Action queue is (n_action_steps, batch_size, action_dim), so we transpose the action. - self._queues["action"].extend(actions[: self.config.n_action_steps]) + self._queues[ACTION].extend(actions[: self.config.n_action_steps]) - action = self._queues["action"].popleft() + action = self._queues[ACTION].popleft() return action @torch.no_grad() @@ -318,7 +318,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.image"] = batch[next(iter(self.config.image_features))] + batch[OBS_IMAGE] = batch[next(iter(self.config.image_features))] batch = self.normalize_targets(batch) info = {} @@ -328,15 +328,15 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: if isinstance(batch[key], torch.Tensor) and batch[key].ndim > 1: batch[key] = batch[key].transpose(1, 0) - action = batch["action"] # (t, b, action_dim) - reward = batch["next.reward"] # (t, b) + action = batch[ACTION] # (t, b, action_dim) + reward = batch[REWARD] # (t, b) observations = {k: v for k, v in batch.items() if k.startswith("observation.")} # Apply random image augmentations. if self.config.image_features and self.config.max_random_shift_ratio > 0: - observations["observation.image"] = flatten_forward_unflatten( + observations[OBS_IMAGE] = flatten_forward_unflatten( partial(random_shifts_aug, max_random_shift_ratio=self.config.max_random_shift_ratio), - observations["observation.image"], + observations[OBS_IMAGE], ) # Get the current observation for predicting trajectories, and all future observations for use in @@ -346,7 +346,7 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: current_observation[k] = observations[k][0] next_observations[k] = observations[k][1:] horizon, batch_size = next_observations[ - "observation.image" if self.config.image_features else "observation.environment_state" + OBS_IMAGE if self.config.image_features else OBS_ENV_STATE ].shape[:2] # Run latent rollout using the latent dynamics model and policy model. From 342e3f2714a996d7c857cda75ab442315b845fe8 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 15:33:12 +0200 Subject: [PATCH 77/88] fix: improve act encapsulation, properly supporting temporal ensembling --- lerobot/common/policies/act/modeling_act.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index 9d475251b7..19846dadf1 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -33,6 +33,7 @@ from torchvision.models._utils import IntermediateLayerGetter from torchvision.ops.misc import FrozenBatchNorm2d +from lerobot.common.constants import ACTION, OBS_IMAGES from lerobot.common.policies.act.configuration_act import ACTConfig from lerobot.common.policies.normalize import Normalize, Unnormalize from lerobot.common.policies.pretrained import PreTrainedPolicy @@ -116,6 +117,11 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: """ self.eval() # keeping the policy in eval mode as it could be set to train mode while queue is consumed + if self.config.temporal_ensemble_coeff is not None: + actions = self.predict_action_chunk(batch)[:, : self.config.n_action_steps] + action = self.temporal_ensembler.update(actions) + return action + # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._action_queue) == 0: @@ -134,17 +140,10 @@ def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = [batch[key] for key in self.config.image_features] - - # If we are using temporal ensembling - if self.config.temporal_ensemble_coeff is not None: - actions = self.model(batch)[0] # (batch_size, chunk_size, action_dim) - actions = self.unnormalize_outputs({"action": actions})["action"] - return actions + batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features] - # Standard action prediction actions = self.model(batch)[0] - actions = self.unnormalize_outputs({"action": actions})["action"] + actions = self.unnormalize_outputs({ACTION: actions})[ACTION] return actions def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: @@ -152,13 +151,13 @@ def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, dict]: batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original - batch["observation.images"] = [batch[key] for key in self.config.image_features] + batch[OBS_IMAGES] = [batch[key] for key in self.config.image_features] batch = self.normalize_targets(batch) actions_hat, (mu_hat, log_sigma_x2_hat) = self.model(batch) l1_loss = ( - F.l1_loss(batch["action"], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1) + F.l1_loss(batch[ACTION], actions_hat, reduction="none") * ~batch["action_is_pad"].unsqueeze(-1) ).mean() loss_dict = {"l1_loss": l1_loss.item()} From d0187a379a2fcaeaec6d352c0146604c4c28349d Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 15:52:37 +0200 Subject: [PATCH 78/88] fix: smolvla action chunking --- .../policies/smolvla/modeling_smolvla.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index 5bf09e5fcb..31d642c175 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -390,15 +390,15 @@ def _predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) batch = self.normalize_inputs(batch) - observation = self.normalize_inputs(batch) + for k in batch: + if k in self._queues: + batch[k] = torch.stack(list(self._queues[k]), dim=1) - images, img_masks = self.prepare_images(observation) - state = self.prepare_state(observation) - lang_tokens, lang_masks = self.policy.prepare_language(observation) + images, img_masks = self.prepare_images(batch) + state = self.prepare_state(batch) + lang_tokens, lang_masks = self.prepare_language(batch) - actions = self.policy.model.sample_actions( - images, img_masks, lang_tokens, lang_masks, state, noise=noise - ) + actions = self.model.sample_actions(images, img_masks, lang_tokens, lang_masks, state, noise=noise) # Unpad actions original_action_dim = self.config.action_feature.shape[0] @@ -423,6 +423,10 @@ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) - """ self.eval() + if self.config.adapt_to_pi_aloha: + batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) + + batch = self.normalize_inputs(batch) self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION]) # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. From aa01e8ce23e94c82396a32644ba1a331e463099b Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 15:57:48 +0200 Subject: [PATCH 79/88] fix: very minor, but very annoying --- lerobot/common/policies/smolvla/modeling_smolvla.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index 31d642c175..cfb597a87e 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -383,7 +383,7 @@ def _load_as_safetensor( def get_optim_params(self) -> dict: return self.parameters() - def _predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: self.eval() if self.config.adapt_to_pi_aloha: From b70573e96ff47678882548c8296f0f37c1cca1e5 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 15:58:08 +0200 Subject: [PATCH 80/88] fix: minor --- lerobot/common/policies/smolvla/modeling_smolvla.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index cfb597a87e..f7dbd1be1d 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -431,7 +431,7 @@ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) - # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._queues[ACTION]) == 0: - actions = self._predict_action_chunk(batch, noise) + actions = self.predict_action_chunk(batch, noise) # `self.predict_action_chunk` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue # effectively has shape (n_action_steps, batch_size, *), hence the transpose. From bdb1f5cbb04b48687caa0c4ebdd02e3de3781dc0 Mon Sep 17 00:00:00 2001 From: Francesco Capuano <74058581+fracapuano@users.noreply.github.com> Date: Thu, 26 Jun 2025 16:00:04 +0200 Subject: [PATCH 81/88] fix minor naming Co-authored-by: Steven Palma Signed-off-by: Francesco Capuano <74058581+fracapuano@users.noreply.github.com> --- lerobot/common/policies/pi0/modeling_pi0.py | 3 +-- lerobot/common/policies/pi0fast/modeling_pi0fast.py | 3 +-- lerobot/common/policies/tdmpc/modeling_tdmpc.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/lerobot/common/policies/pi0/modeling_pi0.py b/lerobot/common/policies/pi0/modeling_pi0.py index e7cb7e1fb6..97e66a272d 100644 --- a/lerobot/common/policies/pi0/modeling_pi0.py +++ b/lerobot/common/policies/pi0/modeling_pi0.py @@ -263,8 +263,7 @@ def get_optim_params(self) -> dict: @torch.no_grad def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations.""" - # NOTE(fracapuano): PI0 does not work, so I am excluding from https://github.com/huggingface/lerobot/pull/1020 - raise NotImplementedError("") + raise NotImplementedError("Currently not implemented for PI0") @torch.no_grad def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: diff --git a/lerobot/common/policies/pi0fast/modeling_pi0fast.py b/lerobot/common/policies/pi0fast/modeling_pi0fast.py index 1c352466cd..dbf5266b16 100644 --- a/lerobot/common/policies/pi0fast/modeling_pi0fast.py +++ b/lerobot/common/policies/pi0fast/modeling_pi0fast.py @@ -195,8 +195,7 @@ def _pi_aloha_encode_actions_inv(self, actions): @torch.no_grad def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations.""" - # NOTE(fracapuan): PI0FAST does not work, so I am excluding from https://github.com/huggingface/lerobot/pull/1020 - raise NotImplementedError("") + raise NotImplementedError("Currently not implemented for PI0FAST") @torch.no_grad def select_action(self, batch: dict[str, Tensor]) -> Tensor: diff --git a/lerobot/common/policies/tdmpc/modeling_tdmpc.py b/lerobot/common/policies/tdmpc/modeling_tdmpc.py index 4ee84404d9..4bb564f8f2 100644 --- a/lerobot/common/policies/tdmpc/modeling_tdmpc.py +++ b/lerobot/common/policies/tdmpc/modeling_tdmpc.py @@ -151,7 +151,7 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: self._queues = populate_queues(self._queues, batch) # When the action queue is depleted, populate it again by querying the policy. - if len(self._queues["action"]) == 0: + if len(self._queues[ACTION]) == 0: actions = self.predict_action_chunk(batch) if self.config.n_action_repeats > 1: From cdeaf19b6bbaf1feda2c9b7c1037c06b52897aaa Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 16:16:56 +0200 Subject: [PATCH 82/88] fix: refactoring inference for single actions and chunks into different components --- .../policies/smolvla/modeling_smolvla.py | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index f7dbd1be1d..01550903c3 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -383,13 +383,7 @@ def _load_as_safetensor( def get_optim_params(self) -> dict: return self.parameters() - def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: - self.eval() - - if self.config.adapt_to_pi_aloha: - batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) - - batch = self.normalize_inputs(batch) + def _get_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: for k in batch: if k in self._queues: batch[k] = torch.stack(list(self._queues[k]), dim=1) @@ -413,6 +407,22 @@ def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = return actions + def _prepare_batch(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: + if self.config.adapt_to_pi_aloha: + batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) + + batch = self.normalize_inputs(batch) + self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION]) + + return batch + + def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: + self.eval() + + batch = self._prepare_batch(batch) + actions = self._get_action_chunk(batch, noise) + return actions + @torch.no_grad def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) -> Tensor: """Select a single action given environment observations. @@ -422,16 +432,12 @@ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) - queue is empty. """ self.eval() + batch = self.prepare_batch(batch) - if self.config.adapt_to_pi_aloha: - batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) - - batch = self.normalize_inputs(batch) - self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION]) # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. if len(self._queues[ACTION]) == 0: - actions = self.predict_action_chunk(batch, noise) + actions = self._get_action_chunk(batch, noise) # `self.predict_action_chunk` returns a (batch_size, n_action_steps, action_dim) tensor, but the queue # effectively has shape (n_action_steps, batch_size, *), hence the transpose. From cba1e623e472b1c6606b26d28104fc6a7467be57 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 16:17:43 +0200 Subject: [PATCH 83/88] fix: minor --- lerobot/common/policies/smolvla/modeling_smolvla.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index 01550903c3..6ff8c09aab 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -432,7 +432,7 @@ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) - queue is empty. """ self.eval() - batch = self.prepare_batch(batch) + batch = self._prepare_batch(batch) # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. From d4277d10351d38713fffa1ecea00d7b24a32682b Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 16:19:07 +0200 Subject: [PATCH 84/88] fix: temporal ensembling --- lerobot/common/policies/act/modeling_act.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/policies/act/modeling_act.py b/lerobot/common/policies/act/modeling_act.py index 19846dadf1..1220665777 100644 --- a/lerobot/common/policies/act/modeling_act.py +++ b/lerobot/common/policies/act/modeling_act.py @@ -118,7 +118,7 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: self.eval() # keeping the policy in eval mode as it could be set to train mode while queue is consumed if self.config.temporal_ensemble_coeff is not None: - actions = self.predict_action_chunk(batch)[:, : self.config.n_action_steps] + actions = self.predict_action_chunk(batch) action = self.temporal_ensembler.update(actions) return action From e653a583578f2af13138698cd3f672efc14de3a1 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 16:56:55 +0200 Subject: [PATCH 85/88] fix: moving populate queues out of modular component for batch preparation --- lerobot/common/policies/smolvla/modeling_smolvla.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index 6ff8c09aab..9ada403c19 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -412,7 +412,6 @@ def _prepare_batch(self, batch: dict[str, Tensor]) -> dict[str, Tensor]: batch[OBS_STATE] = self._pi_aloha_decode_state(batch[OBS_STATE]) batch = self.normalize_inputs(batch) - self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION]) return batch @@ -420,6 +419,8 @@ def predict_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = self.eval() batch = self._prepare_batch(batch) + self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION]) + actions = self._get_action_chunk(batch, noise) return actions @@ -433,6 +434,7 @@ def select_action(self, batch: dict[str, Tensor], noise: Tensor | None = None) - """ self.eval() batch = self._prepare_batch(batch) + self._queues = populate_queues(self._queues, batch, exclude_keys=[ACTION]) # Action queue logic for n_action_steps > 1. When the action_queue is depleted, populate it by # querying the policy. From a7ba5abce4a0d2553de5d283ba7f622c606933c0 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 18:39:23 +0200 Subject: [PATCH 86/88] fix: minor for CI --- lerobot/common/policies/smolvla/modeling_smolvla.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index 9ada403c19..e0fe9e78ba 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -398,9 +398,7 @@ def _get_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = Non original_action_dim = self.config.action_feature.shape[0] actions = actions[:, :, :original_action_dim] - actions = self.policy.unnormalize_outputs( - {ACTION: actions, ROBOT_TYPE: [self.policy.config.robot_type]} - )[ACTION] + actions = self.unnormalize_outputs({ACTION: actions, ROBOT_TYPE: [self.config.robot_type]})[ACTION] if self.config.adapt_to_pi_aloha: actions = self._pi_aloha_encode_actions(actions) From 9a2fe6aa057e588bc1e97e297b5df6dfa534ba58 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 20:16:34 +0200 Subject: [PATCH 87/88] fix: smovla debug --- lerobot/common/policies/smolvla/modeling_smolvla.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index e0fe9e78ba..361999844f 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -63,7 +63,7 @@ from torch import Tensor, nn from transformers import AutoProcessor -from lerobot.common.constants import ACTION, OBS_STATE, ROBOT_TYPE +from lerobot.common.constants import ACTION, OBS_STATE from lerobot.common.policies.normalize import ( Normalize, Unnormalize, @@ -398,7 +398,7 @@ def _get_action_chunk(self, batch: dict[str, Tensor], noise: Tensor | None = Non original_action_dim = self.config.action_feature.shape[0] actions = actions[:, :, :original_action_dim] - actions = self.unnormalize_outputs({ACTION: actions, ROBOT_TYPE: [self.config.robot_type]})[ACTION] + actions = self.unnormalize_outputs({ACTION: actions})[ACTION] if self.config.adapt_to_pi_aloha: actions = self._pi_aloha_encode_actions(actions) From 9617c90558572b91ed0761257b2e4e65d5484a51 Mon Sep 17 00:00:00 2001 From: Francesco Capuano Date: Thu, 26 Jun 2025 20:42:32 +0200 Subject: [PATCH 88/88] fix: reward classifier, maybe the last policy lacking? --- .../policies/sac/reward_model/modeling_classifier.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lerobot/common/policies/sac/reward_model/modeling_classifier.py b/lerobot/common/policies/sac/reward_model/modeling_classifier.py index f537e3aefd..7fec67f1a0 100644 --- a/lerobot/common/policies/sac/reward_model/modeling_classifier.py +++ b/lerobot/common/policies/sac/reward_model/modeling_classifier.py @@ -308,6 +308,13 @@ def select_action(self, batch: dict[str, Tensor]) -> Tensor: """ raise NotImplementedError("Reward classifiers do not select actions") + def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: + """ + This method is required by PreTrainedPolicy but not used for reward classifiers. + The reward classifier is not an actor and does not produce action chunks. + """ + raise NotImplementedError("Reward classifiers do not predict action chunks") + def reset(self): """ This method is required by PreTrainedPolicy but not used for reward classifiers.