From 61f33a86b071ce66c0b22b8914792d7612fa2ca6 Mon Sep 17 00:00:00 2001 From: Ben Zhang Date: Tue, 3 Jun 2025 18:43:42 -0700 Subject: [PATCH 1/3] Fix smolvla loss not sent to wandb --- lerobot/common/policies/smolvla/modeling_smolvla.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index efdf602d46..c61bd38c46 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -324,21 +324,21 @@ def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> dict[str, actions_is_pad = batch.get("actions_id_pad") loss_dict = {} losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time) - loss_dict["losses_after_forward"] = losses.clone() + loss_dict["losses_after_forward"] = losses.tolist() if actions_is_pad is not None: in_episode_bound = ~actions_is_pad losses = losses * in_episode_bound.unsqueeze(-1) - loss_dict["losses_after_in_ep_bound"] = losses.clone() + loss_dict["losses_after_in_ep_bound"] = losses.tolist() # Remove padding losses = losses[:, :, : self.config.max_action_dim] - loss_dict["losses_after_rm_padding"] = losses.clone() + loss_dict["losses_after_rm_padding"] = losses.tolist() # For backward pass loss = losses.mean() # For backward pass - loss_dict["loss"] = loss + loss_dict["loss"] = loss.item() return loss, loss_dict def prepare_images(self, batch): From cfb0a6c8de64aae2d70e860aea3e1637c6d89449 Mon Sep 17 00:00:00 2001 From: Ben Zhang Date: Tue, 3 Jun 2025 18:27:08 -0700 Subject: [PATCH 2/3] Print the type of unknown wandb logging metric --- lerobot/common/utils/wandb_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lerobot/common/utils/wandb_utils.py b/lerobot/common/utils/wandb_utils.py index 3fe241d412..9e938e1917 100644 --- a/lerobot/common/utils/wandb_utils.py +++ b/lerobot/common/utils/wandb_utils.py @@ -115,7 +115,7 @@ def log_dict(self, d: dict, step: int, mode: str = "train"): for k, v in d.items(): if not isinstance(v, (int, float, str)): logging.warning( - f'WandB logging of key "{k}" was ignored as its type is not handled by this wrapper.' + f'WandB logging of key "{k}" was ignored as its type "{type(v)}" is not handled by this wrapper.' ) continue self._wandb.log({f"{mode}/{k}": v}, step=step) From a9aede6c33c5dbb426ab574e44a1ae8be85f2c70 Mon Sep 17 00:00:00 2001 From: Ben Zhang Date: Tue, 3 Jun 2025 19:10:19 -0700 Subject: [PATCH 3/3] Revert storing loss lists because lists can't be sent to wandb either --- lerobot/common/policies/smolvla/modeling_smolvla.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lerobot/common/policies/smolvla/modeling_smolvla.py b/lerobot/common/policies/smolvla/modeling_smolvla.py index c61bd38c46..008ba83806 100644 --- a/lerobot/common/policies/smolvla/modeling_smolvla.py +++ b/lerobot/common/policies/smolvla/modeling_smolvla.py @@ -324,16 +324,16 @@ def forward(self, batch: dict[str, Tensor], noise=None, time=None) -> dict[str, actions_is_pad = batch.get("actions_id_pad") loss_dict = {} losses = self.model.forward(images, img_masks, lang_tokens, lang_masks, state, actions, noise, time) - loss_dict["losses_after_forward"] = losses.tolist() + loss_dict["losses_after_forward"] = losses.clone() if actions_is_pad is not None: in_episode_bound = ~actions_is_pad losses = losses * in_episode_bound.unsqueeze(-1) - loss_dict["losses_after_in_ep_bound"] = losses.tolist() + loss_dict["losses_after_in_ep_bound"] = losses.clone() # Remove padding losses = losses[:, :, : self.config.max_action_dim] - loss_dict["losses_after_rm_padding"] = losses.tolist() + loss_dict["losses_after_rm_padding"] = losses.clone() # For backward pass loss = losses.mean()