Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

re_renderer: implement depth cloud renderer #1415

Merged
merged 21 commits into from
Mar 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added crates/re_renderer/examples/assets/rerun.obj.zip
Binary file not shown.
486 changes: 486 additions & 0 deletions crates/re_renderer/examples/depth_cloud.rs

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion crates/re_renderer/examples/multiview.rs
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ impl Example for Multiview {
.collect_vec();

let model_mesh_instances = {
let reader = std::io::Cursor::new(include_bytes!("rerun.obj.zip"));
let reader = std::io::Cursor::new(include_bytes!("assets/rerun.obj.zip"));
let mut zip = zip::ZipArchive::new(reader).unwrap();
let mut zipped_obj = zip.by_name("rerun.obj").unwrap();
let mut obj_data = Vec::new();
Expand Down
2 changes: 1 addition & 1 deletion crates/re_renderer/shader/composite.wgsl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ fn main(in: VertexOutput) -> @location(0) Vec4 {
// but are about the location of the texel in the target texture.
var input = textureSample(input_texture, nearest_sampler, in.texcoord).rgb;
// TODO(andreas): Do something meaningful with values above 1
input = clamp(input, ZERO, ONE);
input = clamp(input, ZERO.xyz, ONE.xyz);

// Convert to srgb - this is necessary since the final eframe output does *not* have an srgb format.
// Note that the input here is assumed to be linear - if the input texture was an srgb texture it would have been converted on load.
Expand Down
120 changes: 120 additions & 0 deletions crates/re_renderer/shader/depth_cloud.wgsl
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
//! Renders a point cloud from a depth texture and a set of intrinsics.
//!
//! See `src/renderer/depth_cloud.rs` for more documentation.

#import <./global_bindings.wgsl>
#import <./types.wgsl>
#import <./utils/camera.wgsl>
#import <./utils/flags.wgsl>
#import <./utils/size.wgsl>
#import <./utils/sphere_quad.wgsl>
#import <./utils/srgb.wgsl>

// ---

struct PointData {
pos_in_world: Vec3,
unresolved_radius: f32,
color: Vec4
}

// Backprojects the depth texture using the intrinsics passed in the uniform buffer.
fn compute_point_data(quad_idx: i32) -> PointData {
teh-cmc marked this conversation as resolved.
Show resolved Hide resolved
let wh = textureDimensions(depth_texture);
let texcoords = IVec2(quad_idx % wh.x, quad_idx / wh.x);

// TODO(cmc): expose knobs to linearize/normalize/flip/cam-to-plane depth.
let norm_linear_depth = textureLoad(depth_texture, texcoords, 0).x;

// TODO(cmc): support color maps & albedo textures
let color = Vec4(linear_from_srgb(Vec3(norm_linear_depth)), 1.0);

// TODO(cmc): This assumes a pinhole camera; need to support other kinds at some point.
let intrinsics = transpose(depth_cloud_info.depth_camera_intrinsics);
let focal_length = Vec2(intrinsics[0][0], intrinsics[1][1]);
let offset = Vec2(intrinsics[2][0], intrinsics[2][1]);

let pos_in_obj = Vec3(
(Vec2(texcoords) - offset) * norm_linear_depth / focal_length,
norm_linear_depth,
);

let pos_in_world = depth_cloud_info.world_from_obj * Vec4(pos_in_obj, 1.0);

var data: PointData;
data.pos_in_world = pos_in_world.xyz;
data.unresolved_radius = norm_linear_depth * depth_cloud_info.radius_scale;
data.color = color;

return data;
}

// ---

struct DepthCloudInfo {
world_from_obj: Mat4,

/// The intrinsics of the camera used for the projection.
///
/// Only supports pinhole cameras at the moment.
depth_camera_intrinsics: Mat3,

/// The scale to apply to the radii of the backprojected points.
radius_scale: f32,
};
@group(1) @binding(0)
var<uniform> depth_cloud_info: DepthCloudInfo;

@group(1) @binding(1)
var depth_texture: texture_2d<f32>;

struct VertexOut {
@builtin(position) pos_in_clip: Vec4,
@location(0) pos_in_world: Vec3,
@location(1) point_pos_in_world: Vec3,
@location(2) point_color: Vec4,
@location(3) point_radius: f32,
};

@vertex
fn vs_main(@builtin(vertex_index) vertex_idx: u32) -> VertexOut {
let quad_idx = sphere_quad_index(vertex_idx);

// Compute point data (valid for the entire quad).
let point_data = compute_point_data(quad_idx);

// Span quad
let quad = sphere_quad_span(vertex_idx, point_data.pos_in_world, point_data.unresolved_radius);

var out: VertexOut;
out.pos_in_clip = frame.projection_from_world * Vec4(quad.pos_in_world, 1.0);
out.pos_in_world = quad.pos_in_world;
out.point_pos_in_world = point_data.pos_in_world;
out.point_color = point_data.color;
out.point_radius = quad.point_resolved_radius;

return out;
}

@fragment
fn fs_main(in: VertexOut) -> @location(0) Vec4 {
// There's easier ways to compute anti-aliasing for when we are in ortho mode since it's
// just circles.
// But it's very nice to have mostly the same code path and this gives us the sphere world
// position along the way.
let ray_in_world = camera_ray_to_world_pos(in.pos_in_world);

// Sphere intersection with anti-aliasing as described by Iq here
// https://www.shadertoy.com/view/MsSSWV
// (but rearranged and labled to it's easier to understand!)
let d = ray_sphere_distance(ray_in_world, in.point_pos_in_world, in.point_radius);
let smallest_distance_to_sphere = d.x;
let closest_ray_dist = d.y;
let pixel_world_size = approx_pixel_world_size_at(closest_ray_dist);
if smallest_distance_to_sphere > pixel_world_size {
discard;
}
let coverage = 1.0 - saturate(smallest_distance_to_sphere / pixel_world_size);

return vec4(in.point_color.rgb, coverage);
}
2 changes: 1 addition & 1 deletion crates/re_renderer/shader/global_bindings.wgsl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
struct FrameUniformBuffer {
view_from_world: mat4x3<f32>,
view_from_world: Mat4x3,
projection_from_view: Mat4,
projection_from_world: Mat4,

Expand Down
91 changes: 8 additions & 83 deletions crates/re_renderer/shader/point_cloud.wgsl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#import <./utils/camera.wgsl>
#import <./utils/flags.wgsl>
#import <./utils/size.wgsl>
#import <./utils/sphere_quad.wgsl>

@group(1) @binding(0)
var position_data_texture: texture_2d<f32>;
Expand Down Expand Up @@ -52,103 +53,27 @@ fn read_data(idx: i32) -> PointData {
return data;
}

fn span_quad_perspective(
point_pos: Vec3,
point_radius: f32,
top_bottom: f32,
left_right: f32,
to_camera: Vec3,
camera_distance: f32
) -> Vec3 {
let distance_to_camera_sq = camera_distance * camera_distance; // (passing on micro-optimization here for splitting this out of earlier length calculation)
let distance_to_camera_inv = 1.0 / camera_distance;
let quad_normal = to_camera * distance_to_camera_inv;
let quad_right = normalize(cross(quad_normal, frame.view_from_world[1].xyz)); // It's spheres so any orthogonal vector would do.
let quad_up = cross(quad_right, quad_normal);
let pos_in_quad = top_bottom * quad_up + left_right * quad_right;

// But we want to draw pretend-spheres here!
// If camera gets close to a sphere (or the sphere is large) then outlines of the sphere would not fit on a quad with radius r!
// Enlarging the quad is one solution, but then Z gets tricky (== we need to write correct Z and not quad Z to depth buffer) since we may get
// "unnecessary" overlaps. So instead, we change the size _and_ move the sphere closer (using math!)
let radius_sq = point_radius * point_radius;
let camera_offset = radius_sq * distance_to_camera_inv;
var modified_radius = point_radius * distance_to_camera_inv * sqrt(distance_to_camera_sq - radius_sq);

// We're computing a coverage mask in the fragment shader - make sure the quad doesn't cut off our antialiasing.
// It's fairly subtle but if we don't do this our spheres look slightly squarish
modified_radius += frame.pixel_world_size_from_camera_distance * camera_distance;

return point_pos + pos_in_quad * modified_radius + camera_offset * quad_normal;

// normal billboard (spheres are cut off!):
// pos = point_data.pos + pos_in_quad * point_radius;
// only enlarged billboard (works but requires z care even for non-overlapping spheres):
// modified_radius = length(toCamera) * radius / sqrt(distance_to_camera_sq - radius_sq);
// pos = particleCenter + quadPosition * modified_radius;
}

fn span_quad_orthographic(point_pos: Vec3, point_radius: f32, top_bottom: f32, left_right: f32) -> Vec3 {
let quad_normal = frame.camera_forward;
let quad_right = normalize(cross(quad_normal, frame.view_from_world[1].xyz)); // It's spheres so any orthogonal vector would do.
let quad_up = cross(quad_right, quad_normal);
let pos_in_quad = top_bottom * quad_up + left_right * quad_right;

// We're computing a coverage mask in the fragment shader - make sure the quad doesn't cut off our antialiasing.
// It's fairly subtle but if we don't do this our spheres look slightly squarish
let radius = point_radius + frame.pixel_world_size_from_camera_distance;

return point_pos + pos_in_quad * radius;
}

@vertex
fn vs_main(@builtin(vertex_index) vertex_idx: u32) -> VertexOut {
// Basic properties of the vertex we're at.
let quad_idx = i32(vertex_idx) / 6;
let local_idx = vertex_idx % 6u;
let top_bottom = f32(local_idx <= 1u || local_idx == 5u) * 2.0 - 1.0; // 1 for a top vertex, -1 for a bottom vertex.
let left_right = f32(vertex_idx % 2u) * 2.0 - 1.0; // 1 for a right vertex, -1 for a left vertex.
let quad_idx = sphere_quad_index(vertex_idx);

// Read point data (valid for the entire quad)
let point_data = read_data(quad_idx);
// Resolve radius to a world size. We need the camera distance for this, which is useful later on.
let to_camera = frame.camera_position - point_data.pos;
let camera_distance = length(to_camera);
let radius = unresolved_size_to_world(point_data.unresolved_radius, camera_distance, frame.auto_size_points);

// Span quad
var pos: Vec3;
if is_camera_perspective() {
pos = span_quad_perspective(point_data.pos, radius, top_bottom, left_right, to_camera, camera_distance);
} else {
pos = span_quad_orthographic(point_data.pos, radius, top_bottom, left_right);
}
let quad = sphere_quad_span(vertex_idx, point_data.pos, point_data.unresolved_radius);

// Output, transform to projection space and done.
var out: VertexOut;
out.position = frame.projection_from_world * Vec4(pos, 1.0);
out.position = frame.projection_from_world * Vec4(quad.pos_in_world, 1.0);
out.color = point_data.color;
out.radius = radius;
out.world_position = pos;
out.radius = quad.point_resolved_radius;
out.world_position = quad.pos_in_world;
out.point_center = point_data.pos;

return out;
}


// Returns distance to sphere surface (x) and distance to of closest ray hit (y)
// Via https://iquilezles.org/articles/spherefunctions/ but with more verbose names.
fn sphere_distance(ray: Ray, sphere_origin: Vec3, sphere_radius: f32) -> Vec2 {
let sphere_radius_sq = sphere_radius * sphere_radius;
let sphere_to_origin = ray.origin - sphere_origin;
let b = dot(sphere_to_origin, ray.direction);
let c = dot(sphere_to_origin, sphere_to_origin) - sphere_radius_sq;
let h = b * b - c;
let d = sqrt(max(0.0, sphere_radius_sq - h)) - sphere_radius;
return Vec2(d, -b - sqrt(max(h, 0.0)));
}


@fragment
fn fs_main(in: VertexOut) -> @location(0) Vec4 {
// There's easier ways to compute anti-aliasing for when we are in ortho mode since it's just circles.
Expand All @@ -158,11 +83,11 @@ fn fs_main(in: VertexOut) -> @location(0) Vec4 {
// Sphere intersection with anti-aliasing as described by Iq here
// https://www.shadertoy.com/view/MsSSWV
// (but rearranged and labeled to it's easier to understand!)
let d = sphere_distance(ray, in.point_center, in.radius);
let d = ray_sphere_distance(ray, in.point_center, in.radius);
let smallest_distance_to_sphere = d.x;
let closest_ray_dist = d.y;
let pixel_world_size = approx_pixel_world_size_at(closest_ray_dist);
if smallest_distance_to_sphere > pixel_world_size {
if smallest_distance_to_sphere > pixel_world_size {
discard;
}
let coverage = 1.0 - saturate(smallest_distance_to_sphere / pixel_world_size);
Expand Down
6 changes: 4 additions & 2 deletions crates/re_renderer/shader/types.wgsl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ type UVec4 = vec4<u32>;
type IVec2 = vec2<i32>;
type IVec3 = vec3<i32>;
type IVec4 = vec4<i32>;
type Mat3 = mat3x3<f32>;
type Mat4x3 = mat4x3<f32>;
type Mat4 = mat4x4<f32>;

const f32min = -3.4028235e38;
Expand All @@ -21,5 +23,5 @@ const X = Vec3(1.0, 0.0, 0.0);
const Y = Vec3(0.0, 1.0, 0.0);
const Z = Vec3(0.0, 0.0, 1.0);

const ZERO = Vec3(0.0, 0.0, 0.0);
const ONE = Vec3(1.0, 1.0, 1.0);
const ZERO = Vec4(0.0, 0.0, 0.0, 0.0);
const ONE = Vec4(1.0, 1.0, 1.0, 1.0);
15 changes: 13 additions & 2 deletions crates/re_renderer/shader/utils/camera.wgsl
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,22 @@ fn camera_ray_direction_from_screenuv(texcoord: Vec2) -> Vec3 {
return normalize(world_space_dir);
}

// Returns distance to sphere surface (x) and distance to closest ray hit (y)
// Via https://iquilezles.org/articles/spherefunctions/ but with more verbose names.
fn ray_sphere_distance(ray: Ray, sphere_origin: Vec3, sphere_radius: f32) -> Vec2 {
let sphere_radius_sq = sphere_radius * sphere_radius;
let sphere_to_origin = ray.origin - sphere_origin;
let b = dot(sphere_to_origin, ray.direction);
let c = dot(sphere_to_origin, sphere_to_origin) - sphere_radius_sq;
let h = b * b - c;
let d = sqrt(max(0.0, sphere_radius_sq - h)) - sphere_radius;
return Vec2(d, -b - sqrt(max(h, 0.0)));
}

// Returns the projected size of a pixel at a given distance from the camera.
//
// This is accurate for objects in the middle of the screen, (depending on the angle) less so at the corners
// since an object parallel to the camera (like a conceptual pixel) has a bigger projected surface at higher angles.
fn approx_pixel_world_size_at(camera_distance: f32) -> f32 {
return select(frame.pixel_world_size_from_camera_distance,
camera_distance * frame.pixel_world_size_from_camera_distance, is_camera_perspective());
return select(frame.pixel_world_size_from_camera_distance, camera_distance * frame.pixel_world_size_from_camera_distance, is_camera_perspective());
Wumpf marked this conversation as resolved.
Show resolved Hide resolved
}
Loading