diff --git a/src/lerobot/policies/wall_x/__init__.py b/src/lerobot/policies/wall_x/__init__.py index d80c27bda8..16fd2c8ab8 100644 --- a/src/lerobot/policies/wall_x/__init__.py +++ b/src/lerobot/policies/wall_x/__init__.py @@ -15,5 +15,7 @@ # limitations under the License. from .configuration_wall_x import WallXConfig +from .modeling_wall_x import WallXPolicy +from .processor_wall_x import make_wall_x_pre_post_processors __all__ = ["WallXConfig", "WallXPolicy", "make_wall_x_pre_post_processors"] diff --git a/src/lerobot/policies/wall_x/modeling_wall_x.py b/src/lerobot/policies/wall_x/modeling_wall_x.py index ef99bad89d..c4fdabff75 100644 --- a/src/lerobot/policies/wall_x/modeling_wall_x.py +++ b/src/lerobot/policies/wall_x/modeling_wall_x.py @@ -43,7 +43,6 @@ import torch import torch.nn as nn import torch.nn.functional as F -from peft import LoraConfig, get_peft_model from PIL import Image from qwen_vl_utils.vision_process import smart_resize from torch import Tensor @@ -454,6 +453,8 @@ def define_action_token_id(self): } def add_lora(self, r=8, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.1): + from peft import LoraConfig, get_peft_model + """ Add LoRA (Low-Rank Adaptation) adapters to the model.