Skip to content

Commit

Permalink
🐛 Fix some bugs
Browse files Browse the repository at this point in the history
  • Loading branch information
Skylark0924 committed Oct 14, 2024
1 parent 8c07767 commit 3031a95
Show file tree
Hide file tree
Showing 91 changed files with 14,072 additions and 487 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
import isaacgym
import argparse

from rofunc.config.utils import omegaconf_to_dict, get_config, load_view_motion_config
import rofunc as rf
from rofunc.config.utils import omegaconf_to_dict, get_config
from rofunc.learning.RofuncRL.tasks import Tasks
from rofunc.learning.RofuncRL.trainers import Trainers

Expand Down Expand Up @@ -63,7 +64,7 @@ def inference(custom_args):
# Available types of motion file path:
# 1. test data provided by rofunc: `examples/data/amp/*.npy`
# 2. custom motion file with absolute path
parser.add_argument("--motion_file", type=str, default="/home/ubuntu/Github/HOTU/hotu/data/hotu/010_amp.npy")
parser.add_argument("--motion_file", type=str, default=rf.oslab.get_rofunc_path('../examples/data/amp/amp_humanoid_backflip.npy'))
custom_args = parser.parse_args()

inference(custom_args)
Original file line number Diff line number Diff line change
Expand Up @@ -136,16 +136,16 @@ def inference(custom_args):
parser.add_argument("--debug", type=str, default="False")
parser.add_argument("--headless", type=str, default="True")
parser.add_argument("--inference", action="store_false", help="turn to inference mode while adding this argument")
parser.add_argument("--ckpt_path", type=str, default="/home/ubuntu/Github/Xianova_Robotics/Rofunc-secret/examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUBruce_24-05-28_13-51-39-584325_body_amp5/checkpoints/best_ckpt.pth")
parser.add_argument("--ckpt_path", type=str, default="../examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUBruce_24-05-28_13-51-39-584325_body_amp5/checkpoints/best_ckpt.pth")

# HOTU
# parser.add_argument("--llc_ckpt_path", type=str, default="/home/ubuntu/Github/Xianova_Robotics/Rofunc-secret/examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUHumanoidWQbhandNew_24-05-26_21-16-24-361269_body_amp5/checkpoints/best_ckpt.pth")
# parser.add_argument("--llc_ckpt_path", type=str, default="../examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUHumanoidWQbhandNew_24-05-26_21-16-24-361269_body_amp5/checkpoints/best_ckpt.pth")
# ZJU
# parser.add_argument("--llc_ckpt_path", type=str, default="/home/ubuntu/Github/Xianova_Robotics/Rofunc-secret/examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUZJUHumanoidWQbhandNew_24-05-26_18-57-20-244370_body_amp5/checkpoints/best_ckpt.pth")
# parser.add_argument("--llc_ckpt_path", type=str, default="../examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUZJUHumanoidWQbhandNew_24-05-26_18-57-20-244370_body_amp5/checkpoints/best_ckpt.pth")
# H1
# parser.add_argument("--llc_ckpt_path", type=str, default="/home/ubuntu/Github/Xianova_Robotics/Rofunc-secret/examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUH1WQbhandNew_24-05-27_16-59-15-598225_body_amp5/checkpoints/best_ckpt.pth")
# parser.add_argument("--llc_ckpt_path", type=str, default="../examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUH1WQbhandNew_24-05-27_16-59-15-598225_body_amp5/checkpoints/best_ckpt.pth")
# Bruce
parser.add_argument("--llc_ckpt_path", type=str, default="/home/ubuntu/Github/Xianova_Robotics/Rofunc-secret/examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUBruce_24-05-28_13-51-39-584325_body_amp5/checkpoints/best_ckpt.pth")
parser.add_argument("--llc_ckpt_path", type=str, default="../examples/learning_rl/IsaacGym_RofuncRL/saved_runs/RofuncRL_HOTUTrainer_HumanoidHOTUGetup_HOTUBruce_24-05-28_13-51-39-584325_body_amp5/checkpoints/best_ckpt.pth")

custom_args = parser.parse_args()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def inference(custom_args):
parser.add_argument("--rl_device", type=int, default=1)
parser.add_argument("--headless", type=str, default="True")
parser.add_argument("--inference", action="store_true", help="turn to inference mode while adding this argument")
parser.add_argument("--ckpt_path", type=str, default="/home/ubuntu/Github/Xianova_Robotics/Rofunc-secret/examples/learning_rl/IsaacGym_RofuncRL/runs/RofuncRL_PhysHOITrainer_HumanoidPhysHOI_24-04-23_18-21-03-579079/checkpoints/best_ckpt.pth")
parser.add_argument("--ckpt_path", type=str, default="../examples/learning_rl/IsaacGym_RofuncRL/runs/RofuncRL_PhysHOITrainer_HumanoidPhysHOI_24-04-23_18-21-03-579079/checkpoints/best_ckpt.pth")
parser.add_argument("--debug", type=str, default="False")
custom_args = parser.parse_args()

Expand Down
105 changes: 105 additions & 0 deletions examples/learning_rl/IsaacLab_RofuncRL/example_isaaclab_env.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
"""
Ant (RofuncRL)
===========================
Ant RL using RofuncRL
"""

import sys,os

sys.path.append("/home/ubuntu/Github/Rofunc")

import argparse

from rofunc.config.utils import omegaconf_to_dict, get_config
from rofunc.learning.RofuncRL.tasks import Tasks
from rofunc.learning.RofuncRL.trainers import Trainers
from rofunc.learning.pre_trained_models.download import model_zoo
from rofunc.learning.utils.utils import set_seed
from rofunc.learning.RofuncRL.tasks.utils.env_loaders import load_isaaclab_env
from rofunc.learning.utils.env_wrappers import wrap_env


def train(custom_args):
# Config task and trainer parameters for Isaac Gym environments
custom_args.num_envs = 64 if custom_args.agent.upper() in ["SAC", "TD3"] else custom_args.num_envs

args_overrides = ["task={}".format(custom_args.task),
"train={}{}RofuncRL".format(custom_args.task, custom_args.agent.upper()),
"device_id={}".format(custom_args.sim_device),
"rl_device=cuda:{}".format(custom_args.rl_device),
"headless={}".format(custom_args.headless),
"num_envs={}".format(custom_args.num_envs)]
cfg = get_config('./learning/rl', 'config', args=args_overrides)
cfg_dict = omegaconf_to_dict(cfg.task)

set_seed(cfg.train.Trainer.seed)

# Instantiate the Isaac Gym environment
env = load_isaaclab_env(task_name="Isaac-Cartpole-v0", headless=True, num_envs=custom_args.num_envs)

# Instantiate the RL trainer
trainer = Trainers().trainer_map[custom_args.agent](cfg=cfg,
env=env,
device=cfg.rl_device,
env_name=custom_args.task)

# Start training
trainer.train()


def inference(custom_args):
# Config task and trainer parameters for Isaac Gym environments
args_overrides = ["task={}".format(custom_args.task),
"train={}{}RofuncRL".format(custom_args.task, custom_args.agent.upper()),
"device_id={}".format(custom_args.sim_device),
"rl_device=cuda:{}".format(custom_args.rl_device),
"headless={}".format(False),
"num_envs={}".format(16)]
cfg = get_config('./learning/rl', 'config', args=args_overrides)
cfg_dict = omegaconf_to_dict(cfg.task)

set_seed(cfg.train.Trainer.seed)

# Instantiate the Isaac Gym environment
infer_env = Tasks().task_map[custom_args.task](cfg=cfg_dict,
rl_device=cfg.rl_device,
sim_device=f'cuda:{cfg.device_id}',
graphics_device_id=cfg.device_id,
headless=cfg.headless,
virtual_screen_capture=cfg.capture_video, # TODO: check
force_render=cfg.force_render)

# Instantiate the RL trainer
trainer = Trainers().trainer_map[custom_args.agent](cfg=cfg,
env=infer_env,
device=cfg.rl_device,
env_name=custom_args.task,
inference=True)
# load checkpoint
if custom_args.ckpt_path is None:
custom_args.ckpt_path = model_zoo(name="AntRofuncRLPPO.pth")
trainer.agent.load_ckpt(custom_args.ckpt_path)

# Start inference
trainer.inference()


if __name__ == '__main__':
gpu_id = 0

parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="Cartpole")
parser.add_argument("--agent", type=str, default="ppo") # Available agents: ppo, sac, td3, a2c
parser.add_argument("--num_envs", type=int, default=4096)
parser.add_argument("--sim_device", type=int, default=0)
parser.add_argument("--rl_device", type=int, default=gpu_id)
parser.add_argument("--headless", type=str, default="True")
parser.add_argument("--inference", action="store_true", help="turn to inference mode while adding this argument")
parser.add_argument("--ckpt_path", type=str, default=None)
custom_args = parser.parse_args()

if not custom_args.inference:
train(custom_args)
else:
inference(custom_args)
13 changes: 11 additions & 2 deletions examples/robolab/example_coordinate_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

import rofunc as rf

# Quaternion convert
quat = [0.234, 0.23, 0.4, 1.3]

mat = rf.robolab.convert_ori_format(quat, "quat", "mat")
Expand All @@ -28,7 +29,7 @@
# [-0.2098, 0.4048, 0.8900, 1.0000],
# [ 0.0000, 0.0000, 0.0000, 1.0000]]])


# Rotation matrix convert
rot = [[0.7825, -0.4763, 0.4011],
[0.5862, 0.7806, -0.2168],
[-0.2098, 0.4048, 0.8900]]
Expand All @@ -44,7 +45,7 @@
# [Rofunc:INFO] Quaternion: tensor([[0.1673, 0.1644, 0.2859, 0.9291]])
# [Rofunc:INFO] Euler angles: tensor([[0.4269, 0.2114, 0.6429]])


# Euler convert
euler = [0.4268, 0.2114, 0.6430]
quat = rf.robolab.convert_ori_format(euler, "euler", "quat")
mat = rf.robolab.convert_ori_format(euler, "euler", "mat")
Expand All @@ -57,3 +58,11 @@
# tensor([[[ 0.7825, -0.4763, 0.4011],
# [ 0.5863, 0.7806, -0.2168],
# [-0.2098, 0.4047, 0.8900]]])

# Quaternion multiplication
quat1 = [[-0.436865, 0.49775, 0.054428, 0.747283], [0, 0, 1, 0]]
quat2 = [0.707, 0, 0, 0.707]
quat3 = rf.robolab.quat_multiply(quat1, quat2)
rf.logger.beauty_print(f"Result: {rf.robolab.check_quat_tensor(quat3)}")
# [Rofunc:INFO] Result: tensor([[ 0.2195, 0.3904, -0.3135, 0.8373],
# [ 0.0000, 0.7071, 0.7071, 0.0000]])
72 changes: 72 additions & 0 deletions examples/robolab/example_forward_dynamics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
"""
FD from models
========================
Forward dynamics from URDF or MuJoCo XML files.
"""

import pprint
import math

import rofunc as rf

rf.logger.beauty_print("########## Forward kinematics from URDF or MuJoCo XML files with RobotModel class ##########")
rf.logger.beauty_print("---------- Forward kinematics for Franka Panda using URDF file ----------")
model_path = "../../rofunc/simulator/assets/urdf/franka_description/robots/franka_panda.urdf"

joint_value = [[0.0, -math.pi / 4.0, 0.0, math.pi / 2.0, 0.0, math.pi / 4.0, 0.0, 0.0, 0.0],
[0.0, -math.pi / 4.0, 0.0, math.pi / 2.0, 0.0, math.pi / 4.0, 0.0, 0.0, 0.0]]
export_link = "panda_hand"

# # Build the robot model with kinpy
# # Deprecated: kinpy is not supported anymore, just for checking the results!!!! Please use pytorch_kinematics instead.
# robot = rf.robolab.RobotModel(model_path, solve_engine="kinpy", verbose=False)
# # Show the robot chain and joint names, can also be done by verbose=True
# robot.show_chain()
# # Get the forward kinematics of export_link
# pos, rot, ret = robot.get_fk(joint_value, export_link)
#
# # Convert the orientation representation and print the results
# rot = rf.robolab.convert_quat_order(rot, "wxyz", "xyzw")
# rf.logger.beauty_print(f"Position of {export_link}: {pos}")
# rf.logger.beauty_print(f"Rotation of {export_link}: {rot}")
# pprint.pprint(ret, width=1)

# Try the same thing with pytorch_kinematics
robot = rf.robolab.RobotModel(model_path, solve_engine="pytorch_kinematics", verbose=False)
pos, rot, ret = robot.get_fk(joint_value, export_link)
rf.logger.beauty_print(f"Position of {export_link}: {pos}")
rf.logger.beauty_print(f"Rotation of {export_link}: {rot}")
pprint.pprint(ret, width=1)

# rf.logger.beauty_print("---------- Forward kinematics for Bruce Humanoid Robot using MJCF file ----------")
model_path = "../../rofunc/simulator/assets/mjcf/bruce/bruce.xml"
joint_value = [0.0 for _ in range(16)]

export_link = "elbow_pitch_link_r"

# # Build the robot model with pytorch_kinematics, kinpy is not supported for MJCF files
robot = rf.robolab.RobotModel(model_path, solve_engine="pytorch_kinematics", verbose=True)
# Get the forward kinematics of export_link
pos, rot, ret = robot.get_fk(joint_value, export_link)
#
# # Print the results
# rf.logger.beauty_print(f"Position of {export_link}: {pos}")
# rf.logger.beauty_print(f"Rotation of {export_link}: {rot}")
# pprint.pprint(ret, width=1)


model_path = "../../rofunc/simulator/assets/mjcf/hotu/hotu_humanoid.xml"
joint_value = [0.1 for _ in range(34)]

export_link = "left_hand_link_2"

# # Build the robot model with pytorch_kinematics, kinpy is not supported for MJCF files
robot = rf.robolab.RobotModel(model_path, solve_engine="pytorch_kinematics", verbose=True)
# Get the forward kinematics of export_link
pos, rot, ret = robot.get_fk(joint_value, export_link)

# # Print the results
rf.logger.beauty_print(f"Position of {export_link}: {pos}")
rf.logger.beauty_print(f"Rotation of {export_link}: {rot}")
pprint.pprint(ret, width=1)
58 changes: 28 additions & 30 deletions examples/robolab/example_forward_kinematics.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,36 +11,36 @@

import rofunc as rf

# rf.logger.beauty_print("########## Forward kinematics from URDF or MuJoCo XML files with RobotModel class ##########")
# rf.logger.beauty_print("---------- Forward kinematics for Franka Panda using URDF file ----------")
# model_path = "../../rofunc/simulator/assets/urdf/franka_description/robots/franka_panda.urdf"
#
# joint_value = [[0.0, -math.pi / 4.0, 0.0, math.pi / 2.0, 0.0, math.pi / 4.0, 0.0, 0.0, 0.0],
# [0.0, -math.pi / 4.0, 0.0, math.pi / 2.0, 0.0, math.pi / 4.0, 0.0, 0.0, 0.0]]
# export_link = "panda_hand"
#
# # # Build the robot model with kinpy
# # # Deprecated: kinpy is not supported anymore, just for checking the results!!!! Please use pytorch_kinematics instead.
# # robot = rf.robolab.RobotModel(model_path, solve_engine="kinpy", verbose=False)
# # # Show the robot chain and joint names, can also be done by verbose=True
# # robot.show_chain()
# # # Get the forward kinematics of export_link
# # pos, rot, ret = robot.get_fk(joint_value, export_link)
# #
# # # Convert the orientation representation and print the results
# # rot = rf.robolab.convert_quat_order(rot, "wxyz", "xyzw")
# # rf.logger.beauty_print(f"Position of {export_link}: {pos}")
# # rf.logger.beauty_print(f"Rotation of {export_link}: {rot}")
# # pprint.pprint(ret, width=1)
#
# # Try the same thing with pytorch_kinematics
# robot = rf.robolab.RobotModel(model_path, solve_engine="pytorch_kinematics", verbose=False)
rf.logger.beauty_print("########## Forward kinematics from URDF or MuJoCo XML files with RobotModel class ##########")
rf.logger.beauty_print("---------- Forward kinematics for Franka Panda using URDF file ----------")
model_path = "../../rofunc/simulator/assets/urdf/franka_description/robots/franka_panda.urdf"

joint_value = [[0.0, -math.pi / 4.0, 0.0, math.pi / 2.0, 0.0, math.pi / 4.0, 0.0, 0.0, 0.0],
[0.0, -math.pi / 4.0, 0.0, math.pi / 2.0, 0.0, math.pi / 4.0, 0.0, 0.0, 0.0]]
export_link = "panda_hand"

# # Build the robot model with kinpy
# # Deprecated: kinpy is not supported anymore, just for checking the results!!!! Please use pytorch_kinematics instead.
# robot = rf.robolab.RobotModel(model_path, solve_engine="kinpy", verbose=False)
# # Show the robot chain and joint names, can also be done by verbose=True
# robot.show_chain()
# # Get the forward kinematics of export_link
# pos, rot, ret = robot.get_fk(joint_value, export_link)
#
# # Convert the orientation representation and print the results
# rot = rf.robolab.convert_quat_order(rot, "wxyz", "xyzw")
# rf.logger.beauty_print(f"Position of {export_link}: {pos}")
# rf.logger.beauty_print(f"Rotation of {export_link}: {rot}")
# pprint.pprint(ret, width=1)
#
# rf.logger.beauty_print("---------- Forward kinematics for Bruce Humanoid Robot using MJCF file ----------")

# Try the same thing with pytorch_kinematics
robot = rf.robolab.RobotModel(model_path, solve_engine="pytorch_kinematics", verbose=False)
pos, rot, ret = robot.get_fk(joint_value, export_link)
rf.logger.beauty_print(f"Position of {export_link}: {pos}")
rf.logger.beauty_print(f"Rotation of {export_link}: {rot}")
pprint.pprint(ret, width=1)

rf.logger.beauty_print("---------- Forward kinematics for Bruce Humanoid Robot using MJCF file ----------")
model_path = "../../rofunc/simulator/assets/mjcf/bruce/bruce.xml"
joint_value = [0.0 for _ in range(16)]

Expand All @@ -56,13 +56,11 @@
# rf.logger.beauty_print(f"Rotation of {export_link}: {rot}")
# pprint.pprint(ret, width=1)




rf.logger.beauty_print("---------- Forward kinematics for United Digital Human (UDH) using MJCF file ----------")
model_path = "../../rofunc/simulator/assets/mjcf/hotu/hotu_humanoid.xml"
joint_value = [0.1 for _ in range(34)]

export_link = "left_hand_link_2"
export_link = "left_hand_2"

# # Build the robot model with pytorch_kinematics, kinpy is not supported for MJCF files
robot = rf.robolab.RobotModel(model_path, solve_engine="pytorch_kinematics", verbose=True)
Expand Down
19 changes: 11 additions & 8 deletions examples/robolab/example_inverse_kinematics.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,21 @@
Inverse kinematics from URDF or MuJoCo XML files.
"""

import pprint

import math
import torch
import rofunc as rf

rf.logger.beauty_print("########## Inverse kinematics from URDF or MuJoCo XML files with RobotModel class ##########")
rf.logger.beauty_print("---------- Inverse kinematics for Franka Panda using URDF file ----------")
model_path = "/home/ubuntu/Github/Xianova_Robotics/Rofunc-secret/rofunc/simulator/assets/urdf/franka_description/robots/franka_panda.urdf"
model_path = "../../rofunc/simulator/assets/urdf/franka_description/robots/franka_panda.urdf"

ee_pose = [0, 0, 0, 0, 0, 0, 1]
export_link = "panda_hand_frame"
robot = rf.robolab.RobotModel(model_path, solve_engine="kinpy", verbose=True)
export_link = "panda_hand"
robot = rf.robolab.RobotModel(model_path, solve_engine="pytorch_kinematics", verbose=True)

# Get ik solution
ret = robot.get_ik(ee_pose, export_link)
print(ret)
print(ret.solutions)

# Get ik solution near the current configuration
cur_configs = [[-1.7613, 2.7469, -3.5611, -3.8847, 2.7940, 1.9055, 1.9879]]
ret = robot.get_ik(ee_pose, export_link, cur_configs=cur_configs)
print(ret.solutions)
Loading

0 comments on commit 3031a95

Please sign in to comment.