Skip to content

Commit

Permalink
work (#216)
Browse files Browse the repository at this point in the history
  • Loading branch information
StoneT2000 authored and yzqin committed Feb 29, 2024
1 parent 44db4c7 commit a3cdfeb
Show file tree
Hide file tree
Showing 4 changed files with 146 additions and 4 deletions.
7 changes: 4 additions & 3 deletions mani_skill2/envs/sapien_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -961,9 +961,10 @@ def render_human(self):
if self._viewer is None:
self._viewer = Viewer()
self._setup_viewer()
self._viewer.set_camera_pose(
self._human_render_cameras["render_camera"].camera.global_pose
)
if "render_camera" in self._human_render_cameras:
self._viewer.set_camera_pose(
self._human_render_cameras["render_camera"].camera.global_pose
)
for obj in self._hidden_objects:
obj.show_visual()
if physx.is_gpu_enabled() and self._scene._gpu_sim_initialized:
Expand Down
3 changes: 2 additions & 1 deletion mani_skill2/envs/tasks/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from .dexterity import RotateValveEnv
from .empty_env import EmptyEnv
from .fmb.fmb import FMBAssembly1Env
from .open_cabinet_drawer import OpenCabinetDoorEnv, OpenCabinetDrawerEnv
from .pick_cube import PickCubeEnv
Expand All @@ -8,4 +10,3 @@
from .stack_cube import StackCubeEnv
from .two_robot_pick_cube import TwoRobotPickCube
from .two_robot_stack_cube import TwoRobotStackCube
from .dexterity import RotateValveEnv
112 changes: 112 additions & 0 deletions mani_skill2/envs/tasks/empty_env.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
from collections import OrderedDict
from typing import Any, Dict

import numpy as np
import sapien
import torch

from mani_skill2.agents.robots.fetch.fetch import Fetch
from mani_skill2.agents.robots.panda.panda import Panda
from mani_skill2.envs.sapien_env import BaseEnv
from mani_skill2.sensors.camera import CameraConfig
from mani_skill2.utils.building.ground import build_ground
from mani_skill2.utils.registration import register_env
from mani_skill2.utils.sapien_utils import look_at
from mani_skill2.utils.structs.types import GPUMemoryConfig, SimConfig


@register_env("Empty-v1", max_episode_steps=200000)
class EmptyEnv(BaseEnv):
"""
Task Description
----------------
This is just a dummy environment for showcasing robots in a empty scene
Randomizations
--------------
None
Success Conditions
------------------
None
Visualization: link to a video/gif of the task being solved
"""

SUPPORTED_ROBOTS = ["panda", "fetch", "xmate3_robotiq", "anymal"]
# agent: Union[Panda, Fetch]

def __init__(self, *args, robot_uids="panda", **kwargs):
super().__init__(*args, robot_uids=robot_uids, **kwargs)

def _register_sensors(self):
pose = look_at(eye=[0.3, 0, 0.6], target=[-0.1, 0, 0.1])
return [
CameraConfig("base_camera", pose.p, pose.q, 128, 128, np.pi / 2, 0.01, 10)
]

def _register_human_render_cameras(self):
pose = look_at([0.75, -0.75, 0.5], [0.0, 0.0, 0.2])
return CameraConfig("render_camera", pose.p, pose.q, 2048, 2048, 1, 0.01, 10)

def _load_actors(self):
build_ground(self._scene)

def _initialize_actors(self, env_idx: torch.Tensor):
if self.robot_uids == "panda":
qpos = np.array(
[
0.0,
0.0,
0,
-np.pi * 6 / 8,
0,
np.pi * 3 / 4,
np.pi / 4,
0.04,
0.04,
]
)
self.agent.robot.set_qpos(qpos)
elif self.robot_uids == "xmate3_robotiq":
qpos = np.array(
[0, np.pi / 6, 0, np.pi / 3, 0, np.pi / 2, -np.pi / 2, 0, 0]
)
self.agent.robot.set_qpos(qpos)
self.agent.robot.set_pose(sapien.Pose([-0.562, 0, 0]))
elif self.robot_uids == "fetch":
qpos = np.array(
[
0,
0,
0,
0.04,
0,
0,
0,
-np.pi / 4,
0,
np.pi / 4,
0,
np.pi / 3,
0,
0.015,
0.015,
]
)
self.agent.robot.set_qpos(qpos)

def evaluate(self):
return {}

def _get_obs_extra(self, info: Dict):
return OrderedDict()

def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict):
return torch.zeros(self.num_envs, device=self.device)

def compute_normalized_dense_reward(
self, obs: Any, action: torch.Tensor, info: Dict
):
max_reward = 1.0
return self.compute_dense_reward(obs=obs, action=action, info=info) / max_reward
28 changes: 28 additions & 0 deletions manualtest/robot_showcase.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import sys

import gymnasium as gym

import mani_skill2.envs
from mani_skill2.envs.sapien_env import BaseEnv

if __name__ == "__main__":
robot = sys.argv[1]
# robot in ["panda", "fetch", "xmate3_robotiq"]:
env = gym.make(
"Empty-v1",
enable_shadow=True,
robot_uids=robot,
render_mode="human",
control_mode=None,
shader_dir="rt-fast",
)
env: BaseEnv = env.unwrapped
viewer = env.render()
viewer.paused = True

while True:
viewer = env.render()
# if viewer.window.key_press("n"):
# env.close()
# del env
# break

0 comments on commit a3cdfeb

Please sign in to comment.