Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tighter UI for Pinhole and when hovering images #3579

Merged
merged 6 commits into from
Oct 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions crates/re_data_ui/src/image.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,11 +107,11 @@ fn tensor_ui(

match verbosity {
UiVerbosity::Small => {
ui.horizontal_centered(|ui| {
ui.horizontal(|ui| {
if let Some(texture) = &texture_result {
// We want all preview images to take up the same amount of space,
// no matter what the actual aspect ratio of the images are.
let preview_size = Vec2::splat(24.0);
let preview_size = Vec2::splat(ui.available_height());
ui.allocate_ui_with_layout(
preview_size,
egui::Layout::centered_and_justified(egui::Direction::TopDown),
Expand Down
17 changes: 17 additions & 0 deletions crates/re_data_ui/src/pinhole.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,23 @@ impl DataUi for PinholeProjection {
verbosity: UiVerbosity,
query: &re_arrow_store::LatestAtQuery,
) {
if verbosity == UiVerbosity::Small {
// See if this is a trivial pinhole, and can be displayed as such:
let fl = self.focal_length_in_pixels();
let pp = self.principal_point();
if *self == Self::from_focal_length_and_principal_point(fl, pp) {
let fl = if fl.x() == fl.y() {
fl.x().to_string()
} else {
fl.to_string()
};

ui.label(format!("Focal length: {fl}\nPrincipal point: {pp}"))
.on_hover_ui(|ui| self.data_ui(ctx, ui, UiVerbosity::Reduced, query));
Comment on lines +25 to +26
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

focal length is most of the time same on X and Y. I think the ui should tighten this further in that case

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point!

image

return;
}
}

self.0.data_ui(ctx, ui, verbosity, query);
}
}
Expand Down
145 changes: 90 additions & 55 deletions crates/re_space_view_spatial/src/ui.rs
Original file line number Diff line number Diff line change
Expand Up @@ -598,10 +598,8 @@ pub fn picking(
// thing as an up-front archetype query somewhere.
if meaning == TensorDataMeaning::Depth {
if let Some(meter) = meter {
if let Some(raw_value) = tensor.get(&[
picking_context.pointer_in_space2d.y.round() as _,
picking_context.pointer_in_space2d.x.round() as _,
]) {
let [x, y] = coords;
if let Some(raw_value) = tensor.get(&[y as _, x as _]) {
let raw_value = raw_value.as_f64();
let depth_in_meters = raw_value / meter as f64;
depth_at_pointer = Some(depth_in_meters as f32);
Expand All @@ -613,60 +611,21 @@ pub fn picking(
.on_hover_cursor(egui::CursorIcon::Crosshair)
.on_hover_ui_at_pointer(|ui| {
ui.set_max_width(320.0);

ui.vertical(|ui| {
ui.label(instance_path.to_string());
instance_path.data_ui(
ctx,
image_hover_ui(
ui,
UiVerbosity::Small,
&ctx.current_query(),
&instance_path,
ctx,
tensor.value,
spatial_kind,
ui_clip_rect,
coords,
space_from_ui,
tensor_path_hash,
annotations,
meaning,
meter,
);

if let Some([h, w, ..]) = tensor.image_height_width_channels() {
ui.separator();
ui.horizontal(|ui| {
let (w, h) = (w as f32, h as f32);
if spatial_kind == SpatialSpaceViewKind::TwoD {
let rect = egui::Rect::from_min_size(
egui::Pos2::ZERO,
egui::vec2(w, h),
);
show_zoomed_image_region_area_outline(
ui.ctx(),
ui_clip_rect,
&tensor.0,
[coords[0] as _, coords[1] as _],
space_from_ui.inverse().transform_rect(rect),
);
}

let tensor_name = instance_path.to_string();

let decoded_tensor = ctx.cache.entry(|c: &mut TensorDecodeCache| c.entry(tensor_path_hash, tensor.value.0));
match decoded_tensor {
Ok(decoded_tensor) => {
let annotations = annotations.0.find(&instance_path.entity_path);
let tensor_stats = ctx.cache.entry(|c: &mut TensorStatsCache| c.entry(tensor_path_hash, &decoded_tensor));
show_zoomed_image_region(
ctx.render_ctx,
ui,
tensor_path_hash,
&decoded_tensor,
&tensor_stats,
&annotations,
meaning,
meter,
&tensor_name,
[coords[0] as _, coords[1] as _],
);
}
Err(err) => re_log::warn_once!(
"Encountered problem decoding tensor at path {tensor_name}: {err}"
),
}
});
}
});
})
} else {
Expand Down Expand Up @@ -713,6 +672,82 @@ pub fn picking(
Ok(response)
}

#[allow(clippy::too_many_arguments)]
fn image_hover_ui(
ui: &mut egui::Ui,
instance_path: &re_data_store::InstancePath,
ctx: &mut ViewerContext<'_>,
tensor: TensorData,
spatial_kind: SpatialSpaceViewKind,
ui_clip_rect: egui::Rect,
coords: [u32; 2],
space_from_ui: egui::emath::RectTransform,
tensor_path_hash: re_data_store::VersionedInstancePathHash,
annotations: &AnnotationSceneContext,
meaning: TensorDataMeaning,
meter: Option<f32>,
) {
ui.label(instance_path.to_string());
if true {
// Only show the `TensorData` component, to keep the hover UI small; see https://github.com/rerun-io/rerun/issues/3573
use re_types::Loggable as _;
let component_path = re_log_types::ComponentPath::new(
instance_path.entity_path.clone(),
re_types::components::TensorData::name(),
);
component_path.data_ui(ctx, ui, UiVerbosity::Small, &ctx.current_query());
} else {
// Show it all, like we do for any other thing we hover
instance_path.data_ui(ctx, ui, UiVerbosity::Small, &ctx.current_query());
}

if let Some([h, w, ..]) = tensor.image_height_width_channels() {
ui.separator();
ui.horizontal(|ui| {
let (w, h) = (w as f32, h as f32);
if spatial_kind == SpatialSpaceViewKind::TwoD {
let rect = egui::Rect::from_min_size(egui::Pos2::ZERO, egui::vec2(w, h));
show_zoomed_image_region_area_outline(
ui.ctx(),
ui_clip_rect,
&tensor.0,
[coords[0] as _, coords[1] as _],
space_from_ui.inverse().transform_rect(rect),
);
}

let tensor_name = instance_path.to_string();

let decoded_tensor = ctx
.cache
.entry(|c: &mut TensorDecodeCache| c.entry(tensor_path_hash, tensor.0));
match decoded_tensor {
Ok(decoded_tensor) => {
let annotations = annotations.0.find(&instance_path.entity_path);
let tensor_stats = ctx.cache.entry(|c: &mut TensorStatsCache| {
c.entry(tensor_path_hash, &decoded_tensor)
});
show_zoomed_image_region(
ctx.render_ctx,
ui,
tensor_path_hash,
&decoded_tensor,
&tensor_stats,
&annotations,
meaning,
meter,
&tensor_name,
[coords[0] as _, coords[1] as _],
);
}
Err(err) => re_log::warn_once!(
"Encountered problem decoding tensor at path {tensor_name}: {err}"
),
}
});
}
}

fn hit_ui(ui: &mut egui::Ui, hit: &crate::picking::PickingRayHit) {
if hit.hit_type == PickingHitType::GpuPickingResult {
let glam::Vec3 { x, y, z } = hit.space_position;
Expand Down
23 changes: 23 additions & 0 deletions crates/re_types/src/components/pinhole_projection_ext.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,20 @@ use crate::datatypes::Vec2D;
use super::PinholeProjection;

impl PinholeProjection {
#[inline]
pub fn from_focal_length_and_principal_point(
focal_length: impl Into<Vec2D>,
principal_point: impl Into<Vec2D>,
) -> Self {
let fl = focal_length.into();
let pp = principal_point.into();
Self::from([
[fl.x(), 0.0, 0.0],
[0.0, fl.y(), 0.0],
[pp.x(), pp.y(), 1.0],
])
}

/// X & Y focal length in pixels.
///
/// [see definition of intrinsic matrix](https://en.wikipedia.org/wiki/Camera_resectioning#Intrinsic_parameters)
Expand Down Expand Up @@ -43,3 +57,12 @@ impl PinholeProjection {
.extend(pixel.z)
}
}

#[test]
fn test_pinhole() {
let fl = Vec2D::from([600.0, 600.0]);
let pp = glam::Vec2::from([300.0, 240.0]);
let pinhole = PinholeProjection::from_focal_length_and_principal_point(fl, pp);
assert_eq!(pinhole.focal_length_in_pixels(), fl);
assert_eq!(pinhole.principal_point(), pp);
}
Loading