Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix pinhole visualization not working with camera extrinsics & intrinsics on the same path #2568

Merged
merged 3 commits into from
Jun 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 16 additions & 8 deletions crates/re_space_view_spatial/src/scene/parts/cameras.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use re_components::{
coordinates::{Handedness, SignedAxis3},
Component, InstanceKey, Pinhole, ViewCoordinates,
Component, InstanceKey, Pinhole, Transform3D, ViewCoordinates,
};
use re_data_store::{EntityPath, EntityProperties};
use re_renderer::renderer::LineStripFlags;
Expand Down Expand Up @@ -56,12 +56,14 @@ pub struct CamerasPart {
}

impl CamerasPart {
#[allow(clippy::too_many_arguments)]
fn visit_instance(
&mut self,
scene_context: &SpatialSceneContext,
ent_path: &EntityPath,
props: &EntityProperties,
pinhole: Pinhole,
transform_at_entity: Option<Transform3D>,
view_coordinates: ViewCoordinates,
entity_highlight: &SpaceViewOutlineMasks,
) {
Expand All @@ -70,13 +72,10 @@ impl CamerasPart {
// The transform *at* this entity path already has the pinhole transformation we got passed in!
// This makes sense, since if there's an image logged here one would expect that the transform applies.
// We're however first interested in the rigid transform that led here, so query the parent transform.
//
// Note that currently a transform on an object can't have both a pinhole AND a rigid transform,
// which makes this rather well defined here.
let parent_path = ent_path
.parent()
.expect("root path can't be part of scene query");
let Some(world_from_parent) = scene_context.transforms.reference_from_entity(&parent_path) else {
let Some(mut world_from_camera) = scene_context.transforms.reference_from_entity(&parent_path) else {
return;
};

Expand All @@ -94,16 +93,24 @@ impl CamerasPart {
return;
}

// There's one wrinkle with using the parent transform though:
// The entity itself may have a 3D transform which (by convention!) we apply *before* the pinhole camera.
// Let's add that if it exists.
if let Some(transform_at_entity) = transform_at_entity {
world_from_camera =
world_from_camera * transform_at_entity.to_parent_from_child_transform();
}

// If this transform is not representable an iso transform transform we can't display it yet.
// This would happen if the camera is under another camera or under a transform with non-uniform scale.
let Some(world_from_camera) = macaw::IsoTransform::from_mat4(&world_from_parent.into()) else {
let Some(world_from_camera_iso) = macaw::IsoTransform::from_mat4(&world_from_camera.into()) else {
return;
};

self.space_cameras.push(SpaceCamera3D {
ent_path: ent_path.clone(),
view_coordinates,
world_from_camera,
world_from_camera: world_from_camera_iso,
pinhole: Some(pinhole),
picture_plane_distance: frustum_length,
});
Expand Down Expand Up @@ -160,7 +167,7 @@ impl CamerasPart {
let mut line_builder = scene_context.shared_render_builders.lines();
let mut batch = line_builder
.batch("camera frustum")
.world_from_obj(world_from_parent)
.world_from_obj(world_from_camera)
.outline_mask_ids(entity_highlight.overall)
.picking_object_id(instance_layer_id.object);
let lines = batch
Expand Down Expand Up @@ -212,6 +219,7 @@ impl ScenePart<SpatialSpaceView> for CamerasPart {
ent_path,
&props,
pinhole,
store.query_latest_component::<Transform3D>(ent_path, &query),
view_coordinates,
entity_highlight,
);
Expand Down
8 changes: 4 additions & 4 deletions examples/python/objectron/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def log_ar_frames(samples: Iterable[SampleARFrame], seq: Sequence) -> None:
rr.set_time_seconds("time", sample.timestamp)
frame_times.append(sample.timestamp)

rr.log_image_file("world/camera/video", img_path=sample.image_path, img_format=rr.ImageFormat.JPEG)
rr.log_image_file("world/camera", img_path=sample.image_path, img_format=rr.ImageFormat.JPEG)
log_camera(sample.frame.camera)
log_point_cloud(sample.frame.raw_feature_points)

Expand Down Expand Up @@ -149,7 +149,7 @@ def log_camera(cam: ARCamera) -> None:
)
rr.log_view_coordinates("world/camera", xyz="RDF") # X=Right, Y=Down, Z=Forward
rr.log_pinhole(
"world/camera/video",
"world/camera",
child_from_parent=intrinsics,
width=w,
height=h,
Expand Down Expand Up @@ -203,11 +203,11 @@ def log_frame_annotations(frame_times: list[float], frame_annotations: list[Fram
keypoint_pos2s *= IMAGE_RESOLUTION

if len(keypoint_pos2s) == 9:
log_projected_bbox(f"world/camera/video/estimates/box-{obj_ann.object_id}", keypoint_pos2s)
log_projected_bbox(f"world/camera/estimates/box-{obj_ann.object_id}", keypoint_pos2s)
else:
for id, pos2 in zip(keypoint_ids, keypoint_pos2s):
rr.log_point(
f"world/camera/video/estimates/box-{obj_ann.object_id}/{id}",
f"world/camera/estimates/box-{obj_ann.object_id}/{id}",
pos2,
color=[130, 160, 250, 255],
)
Expand Down
15 changes: 6 additions & 9 deletions examples/rust/objectron/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ fn log_baseline_objects(
return None;
}

let box3: Box3D = glam::Vec3::from_slice(&object.scale).into();
let box3: Box3D = (glam::Vec3::from_slice(&object.scale) * 0.5).into();
let transform = {
let translation = glam::Vec3::from_slice(&object.translation);
// NOTE: the dataset is all row-major, transpose those matrices!
Expand Down Expand Up @@ -153,7 +153,7 @@ fn log_video_frame(rec_stream: &RecordingStream, ar_frame: &ArFrame) -> anyhow::
let image_path = ar_frame.dir.join(format!("video/{}.jpg", ar_frame.index));
let tensor = rerun::components::Tensor::from_jpeg_file(&image_path)?;

MsgSender::new("world/camera/video")
MsgSender::new("world/camera")
.with_timepoint(ar_frame.timepoint.clone())
.with_component(&[tensor])?
.send(rec_stream)?;
Expand Down Expand Up @@ -198,7 +198,7 @@ fn log_ar_camera(
rot,
))])?
.send(rec_stream)?;
MsgSender::new("world/camera/video")
MsgSender::new("world/camera")
.with_timepoint(timepoint)
.with_component(&[Pinhole {
image_from_cam: intrinsics.into(),
Expand Down Expand Up @@ -262,12 +262,9 @@ fn log_frame_annotations(
})
.unzip();

let mut msg = MsgSender::new(format!(
"world/camera/video/estimates/box-{}",
ann.object_id
))
.with_timepoint(timepoint.clone())
.with_splat(ColorRGBA::from_rgb(130, 160, 250))?;
let mut msg = MsgSender::new(format!("world/camera/estimates/box-{}", ann.object_id))
.with_timepoint(timepoint.clone())
.with_splat(ColorRGBA::from_rgb(130, 160, 250))?;

if points.len() == 9 {
// Build the preprojected bounding box out of 2D line segments.
Expand Down