-
Notifications
You must be signed in to change notification settings - Fork 169
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
44db4c7
commit a3cdfeb
Showing
4 changed files
with
146 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,112 @@ | ||
from collections import OrderedDict | ||
from typing import Any, Dict | ||
|
||
import numpy as np | ||
import sapien | ||
import torch | ||
|
||
from mani_skill2.agents.robots.fetch.fetch import Fetch | ||
from mani_skill2.agents.robots.panda.panda import Panda | ||
from mani_skill2.envs.sapien_env import BaseEnv | ||
from mani_skill2.sensors.camera import CameraConfig | ||
from mani_skill2.utils.building.ground import build_ground | ||
from mani_skill2.utils.registration import register_env | ||
from mani_skill2.utils.sapien_utils import look_at | ||
from mani_skill2.utils.structs.types import GPUMemoryConfig, SimConfig | ||
|
||
|
||
@register_env("Empty-v1", max_episode_steps=200000) | ||
class EmptyEnv(BaseEnv): | ||
""" | ||
Task Description | ||
---------------- | ||
This is just a dummy environment for showcasing robots in a empty scene | ||
Randomizations | ||
-------------- | ||
None | ||
Success Conditions | ||
------------------ | ||
None | ||
Visualization: link to a video/gif of the task being solved | ||
""" | ||
|
||
SUPPORTED_ROBOTS = ["panda", "fetch", "xmate3_robotiq", "anymal"] | ||
# agent: Union[Panda, Fetch] | ||
|
||
def __init__(self, *args, robot_uids="panda", **kwargs): | ||
super().__init__(*args, robot_uids=robot_uids, **kwargs) | ||
|
||
def _register_sensors(self): | ||
pose = look_at(eye=[0.3, 0, 0.6], target=[-0.1, 0, 0.1]) | ||
return [ | ||
CameraConfig("base_camera", pose.p, pose.q, 128, 128, np.pi / 2, 0.01, 10) | ||
] | ||
|
||
def _register_human_render_cameras(self): | ||
pose = look_at([0.75, -0.75, 0.5], [0.0, 0.0, 0.2]) | ||
return CameraConfig("render_camera", pose.p, pose.q, 2048, 2048, 1, 0.01, 10) | ||
|
||
def _load_actors(self): | ||
build_ground(self._scene) | ||
|
||
def _initialize_actors(self, env_idx: torch.Tensor): | ||
if self.robot_uids == "panda": | ||
qpos = np.array( | ||
[ | ||
0.0, | ||
0.0, | ||
0, | ||
-np.pi * 6 / 8, | ||
0, | ||
np.pi * 3 / 4, | ||
np.pi / 4, | ||
0.04, | ||
0.04, | ||
] | ||
) | ||
self.agent.robot.set_qpos(qpos) | ||
elif self.robot_uids == "xmate3_robotiq": | ||
qpos = np.array( | ||
[0, np.pi / 6, 0, np.pi / 3, 0, np.pi / 2, -np.pi / 2, 0, 0] | ||
) | ||
self.agent.robot.set_qpos(qpos) | ||
self.agent.robot.set_pose(sapien.Pose([-0.562, 0, 0])) | ||
elif self.robot_uids == "fetch": | ||
qpos = np.array( | ||
[ | ||
0, | ||
0, | ||
0, | ||
0.04, | ||
0, | ||
0, | ||
0, | ||
-np.pi / 4, | ||
0, | ||
np.pi / 4, | ||
0, | ||
np.pi / 3, | ||
0, | ||
0.015, | ||
0.015, | ||
] | ||
) | ||
self.agent.robot.set_qpos(qpos) | ||
|
||
def evaluate(self): | ||
return {} | ||
|
||
def _get_obs_extra(self, info: Dict): | ||
return OrderedDict() | ||
|
||
def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict): | ||
return torch.zeros(self.num_envs, device=self.device) | ||
|
||
def compute_normalized_dense_reward( | ||
self, obs: Any, action: torch.Tensor, info: Dict | ||
): | ||
max_reward = 1.0 | ||
return self.compute_dense_reward(obs=obs, action=action, info=info) / max_reward |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,28 @@ | ||
import sys | ||
|
||
import gymnasium as gym | ||
|
||
import mani_skill2.envs | ||
from mani_skill2.envs.sapien_env import BaseEnv | ||
|
||
if __name__ == "__main__": | ||
robot = sys.argv[1] | ||
# robot in ["panda", "fetch", "xmate3_robotiq"]: | ||
env = gym.make( | ||
"Empty-v1", | ||
enable_shadow=True, | ||
robot_uids=robot, | ||
render_mode="human", | ||
control_mode=None, | ||
shader_dir="rt-fast", | ||
) | ||
env: BaseEnv = env.unwrapped | ||
viewer = env.render() | ||
viewer.paused = True | ||
|
||
while True: | ||
viewer = env.render() | ||
# if viewer.window.key_press("n"): | ||
# env.close() | ||
# del env | ||
# break |