Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added Visualization Lables and static publishers for images #211

Open
wants to merge 16 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion spot_rl_experiments/configs/ros_topic_names.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@ HEAD_DEPTH: "/raw_head_depth"
HAND_DEPTH: "/raw_hand_depth"
HAND_DEPTH_UNSCALED: "/raw_hand_depth_unscaled"
HAND_RGB: "/hand_rgb"

IRS_RGB: "/irs_rgb"
IRS_DEPTH: "/irs_depth"
GRIPPER_RGB: "/gripper_rgb"
GRIPPER_DEPTH: "/gripper_depth"
COMPRESSED_IMAGES: "/compressed_images"

FILTERED_HEAD_DEPTH: "/filtered_head_depth"
Expand Down
65 changes: 64 additions & 1 deletion spot_rl_experiments/spot_rl/ros_img_vis.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,15 @@
from spot_rl.utils.utils import ros_topics as rt
from spot_wrapper.utils import resize_to_tallest

RAW_IMG_TOPICS = [rt.HEAD_DEPTH, rt.HAND_DEPTH, rt.HAND_RGB]
# from intel_realsense_payload_for_spotsim2real.IntelRealSenseCameraInterface import IntelRealSenseCameraInterface
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved

RAW_IMG_TOPICS = [rt.HEAD_DEPTH, rt.GRIPPER_DEPTH, rt.GRIPPER_RGB, rt.IRS_RGB]

PROCESSED_IMG_TOPICS = [
rt.FILTERED_HEAD_DEPTH,
rt.FILTERED_HAND_DEPTH,
rt.MASK_RCNN_VIZ_TOPIC,
rt.IRS_DEPTH,
]

FOUR_CC = cv2.VideoWriter_fourcc(*"MP4V")
Expand Down Expand Up @@ -176,6 +179,7 @@ def generate_composite(self):
return None

refreshed_topics = [k for k, v in self.updated.items() if v]
print("Available topics in self.msgs:", self.last_seen)
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved

# Gather latest images
raw_msgs = [self.msgs[i] for i in RAW_IMG_TOPICS]
Expand All @@ -189,13 +193,30 @@ def generate_composite(self):
for idx, raw_msg in enumerate(raw_msgs):
if processed_msgs[idx] is not None:
processed_imgs.append(self.msg_to_cv2(processed_msgs[idx]))
print(processed_imgs[idx].shape)
Achuthankrishna marked this conversation as resolved.
Show resolved Hide resolved

elif processed_msgs[idx] is None and raw_msg is not None:
processed_imgs.append(np.zeros_like(raw_imgs[idx]))

# Crop gripper images
if raw_msgs[1] is not None:
for imgs in [raw_imgs, processed_imgs]:
imgs[1] = imgs[1][:, 124:-60]
# Processing Raw depth
raw_imgs[1] = cv2.convertScaleAbs(raw_imgs[1], alpha=0.03)
# Resizing Mask RCNNN Image to align width
processed_imgs[2] = cv2.resize(processed_imgs[2], (640, 480))
# Processing Raw depth
processed_imgs[3] = cv2.convertScaleAbs(processed_imgs[3], alpha=0.03)
# Add Strips and Labels
raw_imgs = [
self.overlay_topic_text(img, topic)
for img, topic in zip(raw_imgs, RAW_IMG_TOPICS)
]
processed_imgs = [
self.overlay_topic_text(img, topic)
for img, topic in zip(processed_imgs, PROCESSED_IMG_TOPICS)
]

img = np.vstack(
[
Expand Down Expand Up @@ -235,6 +256,48 @@ def generate_composite(self):

return img

@staticmethod
def overlay_topic_text(
img,
topic,
box_color=(0, 0, 0),
text_color=(0, 0, 0),
font_size=1.3,
thickness=2,
):
# Original image dimensions
topic = topic.replace("_", " ").replace("/", "")
og_height, og_width = img.shape[:2]
strip_height = 50
if len(img.shape) == 3:
white_strip = 255 * np.ones((strip_height, og_width, 3), dtype=np.uint8)
else:
white_strip = 255 * np.ones((strip_height, og_width), dtype=np.uint8)

# Resize the original image height by adding the white strip height
viz_img = np.vstack((white_strip, img))

# FONT AND SIZE
font = cv2.FONT_HERSHEY_SIMPLEX
text = f"{topic}"
(text_width, text_height), _ = cv2.getTextSize(text, font, font_size, thickness)

margin = 50
text_x = margin
text_y = strip_height - margin + text_height
cv2.putText(
viz_img,
text,
(text_x, text_y),
font,
font_size,
text_color,
thickness,
cv2.LINE_AA,
)

return viz_img


def bgrify_grayscale_imgs(imgs):
return [
Expand Down
89 changes: 86 additions & 3 deletions spot_rl_experiments/spot_rl/utils/img_publishers.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,20 @@ class SpotLocalRawImagesPublisher(SpotImagePublisher):
rt.HAND_DEPTH,
rt.HAND_RGB,
rt.HAND_DEPTH_UNSCALED,
rt.IRS_RGB,
rt.IRS_DEPTH,
rt.GRIPPER_RGB,
rt.GRIPPER_DEPTH,
]
sources = [
Cam.FRONTRIGHT_DEPTH,
Cam.FRONTLEFT_DEPTH,
Cam.HAND_COLOR,
Cam.HAND_DEPTH_IN_HAND_COLOR_FRAME,
Cam.INTEL_REALSENSE_COLOR,
Cam.INTEL_REALSENSE_DEPTH,
Cam.HAND_COLOR,
Cam.HAND_DEPTH_IN_HAND_COLOR_FRAME,
]

def __init__(self, spot):
Expand All @@ -133,24 +141,99 @@ def _publish(self):
hand_depth_unscaled = imgs[Cam.HAND_DEPTH_IN_HAND_COLOR_FRAME]
hand_rgb = imgs[Cam.HAND_COLOR]

msgs = self.imgs_to_msgs(head_depth, hand_depth, hand_rgb, hand_depth_unscaled)
intel_img_src = [Cam.INTEL_REALSENSE_COLOR]
intel_realsense_color = spot.get_image_responses(
intel_img_src, quality=100, await_the_resp=False
)
intel_response = intel_realsense_color.result()[0]
intel_image: np.ndarray = image_response_to_cv2(intel_response)

intel_depth_src = [Cam.INTEL_REALSENSE_DEPTH]
intel_realsense_depth = spot.get_image_responses(
intel_depth_src, quality=100, await_the_resp=False
)
intel_response_depth = intel_realsense_depth.result()[0]
intel_image_depth: np.ndarray = image_response_to_cv2(intel_response_depth)

gripper_img_src = [Cam.HAND_COLOR]
gripper_color = spot.get_image_responses(
gripper_img_src, quality=100, await_the_resp=False
)
gripper_response = gripper_color.result()[0]
gripper_image: np.ndarray = image_response_to_cv2(gripper_response)

gripper_depth_src = [Cam.HAND_DEPTH_IN_HAND_COLOR_FRAME]
gripper_depth = spot.get_image_responses(
gripper_depth_src, quality=100, await_the_resp=False
)
gripper_response_depth = gripper_depth.result()[0]
gripper_image_depth: np.ndarray = image_response_to_cv2(gripper_response_depth)

msgs = self.imgs_to_msgs(
head_depth,
hand_depth,
hand_rgb,
hand_depth_unscaled,
intel_image,
intel_image_depth,
gripper_image,
gripper_image_depth,
)

for topic, msg in zip(self.pubs.keys(), msgs):
self.pubs[topic].publish(msg)

def imgs_to_msgs(self, head_depth, hand_depth, hand_rgb, hand_depth_unscaled):
def imgs_to_msgs(
self,
head_depth,
hand_depth,
hand_rgb,
hand_depth_unscaled,
intel_image,
intel_image_depth,
gripper_image,
gripper_image_depth,
):
head_depth_msg = self.cv2_to_msg(head_depth, "mono8")
hand_depth_msg = self.cv2_to_msg(hand_depth, "mono8")
hand_rgb_msg = self.cv2_to_msg(hand_rgb, "bgr8")
hand_depth_unscaled_msg = self.cv2_to_msg(hand_depth_unscaled, "mono16")

intel_realsense_color_msg = self.cv2_to_msg(
intel_image, "bgr8"
) # Added message for Intel RealSense color
intel_realsense_depth_msg = self.cv2_to_msg(
intel_image_depth, "mono16"
) # Added message for Intel RealSense color
gripper_color_msg = self.cv2_to_msg(
gripper_image, "bgr8"
) # Added message for gripper color
gripper_depth_msg = self.cv2_to_msg(
gripper_image_depth, "mono16"
) # Added message for gripper color

timestamp = rospy.Time.now()
head_depth_msg.header = Header(stamp=timestamp)
hand_depth_msg.header = Header(stamp=timestamp)
hand_rgb_msg.header = Header(stamp=timestamp)
hand_depth_unscaled_msg.header = Header(stamp=timestamp)

return head_depth_msg, hand_depth_msg, hand_rgb_msg, hand_depth_unscaled_msg
intel_realsense_color_msg.header = Header(stamp=timestamp)
intel_realsense_depth_msg.header = Header(stamp=timestamp)

gripper_color_msg.header = Header(stamp=timestamp)
gripper_depth_msg.header = Header(stamp=timestamp)

return (
head_depth_msg,
hand_depth_msg,
hand_rgb_msg,
hand_depth_unscaled_msg,
intel_realsense_color_msg,
intel_realsense_depth_msg,
gripper_color_msg,
gripper_depth_msg,
)

@staticmethod
def _scale_depth(img, head_depth=False):
Expand Down
4 changes: 4 additions & 0 deletions spot_rl_experiments/spot_rl/utils/robot_subscriber.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@
rt.HAND_RGB,
rt.FILTERED_HEAD_DEPTH,
rt.FILTERED_HAND_DEPTH,
rt.IRS_RGB,
rt.IRS_DEPTH,
rt.GRIPPER_RGB,
rt.GRIPPER_DEPTH,
]
NO_RAW_IMG_TOPICS = [
rt.MASK_RCNN_VIZ_TOPIC,
Expand Down