Skip to content

Commit

Permalink
Fix crash on reaching line number limit (#3093)
Browse files Browse the repository at this point in the history
* Fixes: #3075
* Related to: #3076

The second commit is the actual fix. But to avoid this family of
crashes, the cpuwritegpuread belt now always does error checks and
returns errors instead of paniking

* [x] I have read and agree to [Contributor
Guide](https://github.com/rerun-io/rerun/blob/main/CONTRIBUTING.md) and
the [Code of
Conduct](https://github.com/rerun-io/rerun/blob/main/CODE_OF_CONDUCT.md)
* [x] I've included a screenshot or gif (if applicable)
* [x] I have tested [demo.rerun.io](https://demo.rerun.io/pr/3093) (if
applicable)

- [PR Build Summary](https://build.rerun.io/pr/3093)
- [Docs
preview](https://rerun.io/preview/b6b737939d3219b0ca32e763d7276b5ca0d504ee/docs)
<!--DOCS-PREVIEW-->
- [Examples
preview](https://rerun.io/preview/b6b737939d3219b0ca32e763d7276b5ca0d504ee/examples)
<!--EXAMPLES-PREVIEW--><!--EXAMPLES-PREVIEW--><!--EXAMPLES-PREVIEW--><!--EXAMPLES-PREVIEW-->
- [Recent benchmark results](https://ref.rerun.io/dev/bench/)
- [Wasm size tracking](https://ref.rerun.io/dev/sizes/)
  • Loading branch information
Wumpf authored and jleibs committed Aug 31, 2023
1 parent bc48e14 commit c11fbf1
Show file tree
Hide file tree
Showing 18 changed files with 294 additions and 85 deletions.
3 changes: 3 additions & 0 deletions crates/re_log/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,16 @@
mod channel_logger;
mod multi_logger;
mod result_extensions;
mod setup;

#[cfg(target_arch = "wasm32")]
mod web_logger;

pub use log::{Level, LevelFilter};

pub use result_extensions::ResultExt;

// The tracing macros support more syntax features than the log, that's why we use them:
pub use tracing::{debug, error, info, trace, warn};

Expand Down
40 changes: 40 additions & 0 deletions crates/re_log/src/result_extensions.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
pub trait ResultExt<T, E> {
/// Logs an error if the result is an error and returns the result.
fn ok_or_log_error(self) -> Option<T>
where
E: std::fmt::Display;

/// Unwraps in debug builds otherwise logs an error if the result is an error and returns the result.
fn unwrap_debug_or_log_error(self) -> Option<T>
where
E: std::fmt::Display + std::fmt::Debug;
}

impl<T, E> ResultExt<T, E> for Result<T, E> {
#[track_caller]
fn ok_or_log_error(self) -> Option<T>
where
E: std::fmt::Display,
{
match self {
Ok(t) => Some(t),
Err(err) => {
let loc = std::panic::Location::caller();
log::error!("{}:{} {err}", loc.file(), loc.line());
None
}
}
}

#[track_caller]
fn unwrap_debug_or_log_error(self) -> Option<T>
where
E: std::fmt::Display + std::fmt::Debug,
{
if cfg!(debug_assertions) {
Some(self.unwrap())
} else {
self.ok_or_log_error()
}
}
}
2 changes: 1 addition & 1 deletion crates/re_renderer/examples/2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ impl framework::Example for Render2D {
}

let line_strip_draw_data = line_strip_builder.into_draw_data(re_ctx).unwrap();
let point_draw_data = point_cloud_builder.into_draw_data(re_ctx);
let point_draw_data = point_cloud_builder.into_draw_data(re_ctx).unwrap();

let image_scale = 4.0;
let rectangle_draw_data = RectangleDrawData::new(
Expand Down
2 changes: 1 addition & 1 deletion crates/re_renderer/examples/depth_cloud.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ impl RenderDepthClouds {
std::iter::empty::<re_renderer::PickingLayerInstanceId>(),
);

builder.into_draw_data(re_ctx)
builder.into_draw_data(re_ctx).unwrap()
};

let mut view_builder = ViewBuilder::new(
Expand Down
2 changes: 1 addition & 1 deletion crates/re_renderer/examples/multiview.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ impl Example for Multiview {
std::iter::empty::<re_renderer::PickingLayerInstanceId>(),
);

let point_cloud = builder.into_draw_data(re_ctx);
let point_cloud = builder.into_draw_data(re_ctx).unwrap();
let meshes = build_mesh_instances(
re_ctx,
&self.model_mesh_instances,
Expand Down
2 changes: 1 addition & 1 deletion crates/re_renderer/examples/picking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ impl framework::Example for Picking {
point_set.picking_ids.iter().cloned(),
);
}
view_builder.queue_draw(point_builder.into_draw_data(re_ctx));
view_builder.queue_draw(point_builder.into_draw_data(re_ctx).unwrap());

let instances = self
.model_mesh_instances
Expand Down
146 changes: 121 additions & 25 deletions crates/re_renderer/src/allocator/cpu_write_gpu_read_belt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,25 @@ use crate::{
wgpu_resources::{BufferDesc, GpuBuffer, GpuBufferPool},
};

#[derive(thiserror::Error, Debug, PartialEq, Eq)]
pub enum CpuWriteGpuReadError {
#[error("Buffer is full, can't append more data! Buffer has capacity for {buffer_element_capacity} elements.")]
BufferFull { buffer_element_capacity: usize },

#[error("Target buffer has a size of {target_buffer_size}, can't write {copy_size} bytes with an offset of {destination_offset}!")]
TargetBufferTooSmall {
target_buffer_size: u64,
copy_size: u64,
destination_offset: u64,
},

#[error("Target texture doesn't fit the size of the written data to this buffer! Texture size: {target_texture_size} bytes, written data size: {written_data_size} bytes")]
TargetTextureBufferSizeMismatch {
target_texture_size: u64,
written_data_size: usize,
},
}

/// A sub-allocated staging buffer that can be written to.
///
/// Behaves a bit like a fixed sized `Vec` in that far it keeps track of how many elements were written to it.
Expand Down Expand Up @@ -57,43 +76,98 @@ where

/// Pushes a slice of elements into the buffer.
///
/// Panics if the data no longer fits into the buffer.
/// If the buffer is not big enough, only the first `self.remaining_capacity()` elements are pushed before returning an error.
#[inline]
pub fn extend_from_slice(&mut self, elements: &[T]) {
pub fn extend_from_slice(&mut self, elements: &[T]) -> Result<(), CpuWriteGpuReadError> {
let (result, elements) = if elements.len() > self.remaining_capacity() {
(
Err(CpuWriteGpuReadError::BufferFull {
buffer_element_capacity: self.capacity(),
}),
&elements[..self.remaining_capacity()],
)
} else {
(Ok(()), elements)
};

let bytes = bytemuck::cast_slice(elements);
self.as_mut_byte_slice()[..bytes.len()].copy_from_slice(bytes);
self.unwritten_element_range.start += elements.len();

result
}

/// Pushes several elements into the buffer.
///
/// Panics if there are more elements than there is space in the buffer.
///
/// Returns number of elements pushed.
/// If the buffer is not big enough, only the first `self.remaining_capacity()` elements are pushed before returning an error.
/// Otherwise, returns the number of elements pushed for convenience.
#[inline]
pub fn extend(&mut self, elements: impl Iterator<Item = T>) -> usize {
pub fn extend(
&mut self,
elements: impl Iterator<Item = T>,
) -> Result<usize, CpuWriteGpuReadError> {
let num_written_before = self.num_written();

for element in elements {
self.push(element);
if self.unwritten_element_range.start >= self.unwritten_element_range.end {
return Err(CpuWriteGpuReadError::BufferFull {
buffer_element_capacity: self.capacity(),
});
}

self.as_mut_byte_slice()[..std::mem::size_of::<T>()]
.copy_from_slice(bytemuck::bytes_of(&element));
self.unwritten_element_range.start += 1;
}
self.num_written() - num_written_before

Ok(self.num_written() - num_written_before)
}

/// Fills the buffer with n instances of an element.
///
/// If the buffer is not big enough, only the first `self.remaining_capacity()` elements are pushed before returning an error.
pub fn fill_n(&mut self, element: T, num_elements: usize) -> Result<(), CpuWriteGpuReadError> {
let (result, num_elements) = if num_elements > self.remaining_capacity() {
(
Err(CpuWriteGpuReadError::BufferFull {
buffer_element_capacity: self.capacity(),
}),
self.remaining_capacity(),
)
} else {
(Ok(()), num_elements)
};

let mut offset = 0;
let buffer_bytes = self.as_mut_byte_slice();
let element_bytes = bytemuck::bytes_of(&element);

for _ in 0..num_elements {
let end = offset + std::mem::size_of::<T>();
buffer_bytes[offset..end].copy_from_slice(element_bytes);
offset = end;
}
self.unwritten_element_range.start += num_elements;

result
}

/// Pushes a single element into the buffer and advances the write pointer.
///
/// Panics if the data no longer fits into the buffer.
#[inline]
pub fn push(&mut self, element: T) {
assert!(
self.unwritten_element_range.start < self.unwritten_element_range.end,
"CpuWriteGpuReadBuffer<{}> is full ({} elements written)",
std::any::type_name::<T>(),
self.unwritten_element_range.start,
);
pub fn push(&mut self, element: T) -> Result<(), CpuWriteGpuReadError> {
if self.remaining_capacity() == 0 {
return Err(CpuWriteGpuReadError::BufferFull {
buffer_element_capacity: self.capacity(),
});
}

self.as_mut_byte_slice()[..std::mem::size_of::<T>()]
.copy_from_slice(bytemuck::bytes_of(&element));
self.unwritten_element_range.start += 1;

Ok(())
}

/// The number of elements pushed into the buffer so far.
Expand All @@ -102,6 +176,17 @@ where
self.unwritten_element_range.start
}

/// The number of elements that can still be pushed into the buffer.
#[inline]
pub fn remaining_capacity(&self) -> usize {
self.unwritten_element_range.end - self.unwritten_element_range.start
}

/// Total number of elements that the buffer can hold.
pub fn capacity(&self) -> usize {
self.unwritten_element_range.end
}

/// Copies all so far written data to a rectangle on a single 2d texture layer.
///
/// Assumes that the buffer consists of as-tightly-packed-as-possible rows of data.
Expand All @@ -114,16 +199,19 @@ where
encoder: &mut wgpu::CommandEncoder,
destination: wgpu::ImageCopyTexture<'_>,
copy_extent: glam::UVec2,
) {
) -> Result<(), CpuWriteGpuReadError> {
let buffer_info = Texture2DBufferInfo::new(destination.texture.format(), copy_extent);

// Validate that we stay within the written part of the slice (wgpu can't fully know our intention here, so we have to check).
// We go one step further and require the size to be exactly equal - it's too unlikely that you wrote more than is needed!
// (and if you did you probably have regrets anyways!)
debug_assert_eq!(
buffer_info.buffer_size_padded as usize,
self.num_written() * std::mem::size_of::<T>()
);
if buffer_info.buffer_size_padded as usize != self.num_written() * std::mem::size_of::<T>()
{
return Err(CpuWriteGpuReadError::TargetTextureBufferSizeMismatch {
target_texture_size: buffer_info.buffer_size_padded,
written_data_size: self.num_written() * std::mem::size_of::<T>(),
});
}

encoder.copy_buffer_to_texture(
wgpu::ImageCopyBuffer {
Expand All @@ -141,6 +229,8 @@ where
depth_or_array_layers: 1,
},
);

Ok(())
}

/// Copies the entire buffer to another buffer and drops it.
Expand All @@ -149,13 +239,17 @@ where
encoder: &mut wgpu::CommandEncoder,
destination: &GpuBuffer,
destination_offset: wgpu::BufferAddress,
) {
) -> Result<(), CpuWriteGpuReadError> {
let copy_size = (std::mem::size_of::<T>() * self.unwritten_element_range.start) as u64;

// Wgpu does validation as well, but in debug mode we want to panic if the buffer doesn't fit.
debug_assert!(copy_size <= destination_offset + destination.size(),
"Target buffer has a size of {}, can't write {copy_size} bytes with an offset of {destination_offset}!",
destination.size());
// Wgpu does validation as well, but we want to be able to track this error right away.
if copy_size > destination_offset + destination.size() {
return Err(CpuWriteGpuReadError::TargetBufferTooSmall {
target_buffer_size: destination.size(),
copy_size,
destination_offset,
});
}

encoder.copy_buffer_to_buffer(
&self.chunk_buffer,
Expand All @@ -164,6 +258,8 @@ where
destination_offset,
copy_size,
);

Ok(())
}
}

Expand Down
4 changes: 3 additions & 1 deletion crates/re_renderer/src/allocator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@ mod cpu_write_gpu_read_belt;
mod gpu_readback_belt;
mod uniform_buffer_fill;

pub use cpu_write_gpu_read_belt::{CpuWriteGpuReadBelt, CpuWriteGpuReadBuffer};
pub use cpu_write_gpu_read_belt::{
CpuWriteGpuReadBelt, CpuWriteGpuReadBuffer, CpuWriteGpuReadError,
};
pub use gpu_readback_belt::{
GpuReadbackBelt, GpuReadbackBuffer, GpuReadbackError, GpuReadbackIdentifier,
GpuReadbackUserDataStorage,
Expand Down
16 changes: 10 additions & 6 deletions crates/re_renderer/src/allocator/uniform_buffer_fill.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use re_log::ResultExt;

pub use super::cpu_write_gpu_read_belt::{CpuWriteGpuReadBelt, CpuWriteGpuReadBuffer};

use crate::{wgpu_resources::BindGroupEntry, DebugLabel, RenderContext};
Expand Down Expand Up @@ -75,12 +77,14 @@ pub fn create_and_fill_uniform_buffer_batch<T: bytemuck::Pod>(
&ctx.gpu_resources.buffers,
num_buffers as _,
);
staging_buffer.extend(content);
staging_buffer.copy_to_buffer(
ctx.active_frame.before_view_builder_encoder.lock().get(),
&buffer,
0,
);
staging_buffer.extend(content).unwrap_debug_or_log_error();
staging_buffer
.copy_to_buffer(
ctx.active_frame.before_view_builder_encoder.lock().get(),
&buffer,
0,
)
.unwrap_debug_or_log_error();

(0..num_buffers)
.map(|i| BindGroupEntry::Buffer {
Expand Down
Loading

0 comments on commit c11fbf1

Please sign in to comment.