diff --git a/CHANGELOG.md b/CHANGELOG.md index b440074bd95..1223ad6c1d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -106,7 +106,15 @@ One other breaking change worth noting is that in WGSL `@builtin(view_index)` no By @SupaMaggie70Incorporated in [#8206](https://github.com/gfx-rs/wgpu/pull/8206). -#### Error Scopes are now thread-local +#### Error scopes now use guards and are thread-local. + +```diff +- device.push_error_scope(wgpu::ErrorFilter::Validation); ++ let scope = device.push_error_scope(wgpu::ErrorFilter::Validation); + // ... perform operations on the device ... +- let error: Option = device.pop_error_scope().await; ++ let error: Option = scope.pop().await; +``` Device error scopes now operate on a per-thread basis. This allows them to be used easily within multithreaded contexts, without having the error scope capture errors from other threads. diff --git a/examples/features/src/ray_cube_compute/mod.rs b/examples/features/src/ray_cube_compute/mod.rs index 0ba6277577d..9ad04dcfb7b 100644 --- a/examples/features/src/ray_cube_compute/mod.rs +++ b/examples/features/src/ray_cube_compute/mod.rs @@ -420,8 +420,6 @@ impl crate::framework::Example for Example { } fn render(&mut self, view: &wgpu::TextureView, device: &wgpu::Device, queue: &wgpu::Queue) { - device.push_error_scope(wgpu::ErrorFilter::Validation); - let anim_time = self.animation_timer.time(); self.tlas[0].as_mut().unwrap().transform = diff --git a/examples/features/src/ray_cube_fragment/mod.rs b/examples/features/src/ray_cube_fragment/mod.rs index 6cd8fc48839..253397c8dc3 100644 --- a/examples/features/src/ray_cube_fragment/mod.rs +++ b/examples/features/src/ray_cube_fragment/mod.rs @@ -294,8 +294,6 @@ impl crate::framework::Example for Example { } fn render(&mut self, view: &wgpu::TextureView, device: &wgpu::Device, queue: &wgpu::Queue) { - device.push_error_scope(wgpu::ErrorFilter::Validation); - // scene update { let dist = 12.0; diff --git a/examples/features/src/ray_cube_normals/mod.rs b/examples/features/src/ray_cube_normals/mod.rs index 1c7acf7c377..e47429fc36c 100644 --- a/examples/features/src/ray_cube_normals/mod.rs +++ b/examples/features/src/ray_cube_normals/mod.rs @@ -406,8 +406,6 @@ impl crate::framework::Example for Example { } fn render(&mut self, view: &wgpu::TextureView, device: &wgpu::Device, queue: &wgpu::Queue) { - device.push_error_scope(wgpu::ErrorFilter::Validation); - let anim_time = self.animation_timer.time(); self.tlas[0].as_mut().unwrap().transform = diff --git a/examples/features/src/ray_scene/mod.rs b/examples/features/src/ray_scene/mod.rs index 9d401ec578a..049eb1d7c0e 100644 --- a/examples/features/src/ray_scene/mod.rs +++ b/examples/features/src/ray_scene/mod.rs @@ -464,8 +464,6 @@ impl crate::framework::Example for Example { } fn render(&mut self, view: &wgpu::TextureView, device: &wgpu::Device, queue: &wgpu::Queue) { - device.push_error_scope(wgpu::ErrorFilter::Validation); - // scene update { let dist = 3.5; diff --git a/examples/features/src/srgb_blend/mod.rs b/examples/features/src/srgb_blend/mod.rs index 6449a12568d..854f0bb26ac 100644 --- a/examples/features/src/srgb_blend/mod.rs +++ b/examples/features/src/srgb_blend/mod.rs @@ -173,7 +173,6 @@ impl crate::framework::Example for Example { } fn render(&mut self, view: &wgpu::TextureView, device: &wgpu::Device, queue: &wgpu::Queue) { - device.push_error_scope(wgpu::ErrorFilter::Validation); let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None }); { diff --git a/examples/standalone/custom_backend/src/custom.rs b/examples/standalone/custom_backend/src/custom.rs index 7b2403611eb..fe20d95f6ec 100644 --- a/examples/standalone/custom_backend/src/custom.rs +++ b/examples/standalone/custom_backend/src/custom.rs @@ -251,11 +251,11 @@ impl DeviceInterface for CustomDevice { unimplemented!() } - fn push_error_scope(&self, _filter: wgpu::ErrorFilter) { + fn push_error_scope(&self, _filter: wgpu::ErrorFilter) -> u32 { unimplemented!() } - fn pop_error_scope(&self) -> Pin> { + fn pop_error_scope(&self, _index: u32) -> Pin> { unimplemented!() } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index d8100f8b547..0ae9e19b244 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -94,4 +94,5 @@ wasm-bindgen.workspace = true web-sys = { workspace = true, features = ["CanvasRenderingContext2d", "Blob"] } [lints.clippy] +bool_assert_comparison = "allow" disallowed_types = "allow" diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 22afd7ecf77..3903927060b 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -39,9 +39,9 @@ pub fn fail( callback: impl FnOnce() -> T, expected_msg_substring: Option<&str>, ) -> T { - device.push_error_scope(wgpu::ErrorFilter::Validation); + let scope = device.push_error_scope(wgpu::ErrorFilter::Validation); let result = callback(); - let validation_error = pollster::block_on(device.pop_error_scope()) + let validation_error = pollster::block_on(scope.pop()) .expect("expected validation error in callback, but no validation error was emitted"); if let Some(expected_msg_substring) = expected_msg_substring { let lowered_expected = expected_msg_substring.to_lowercase(); @@ -63,9 +63,9 @@ pub fn fail( /// Run some code in an error scope and assert that validation succeeds. #[track_caller] pub fn valid(device: &wgpu::Device, callback: impl FnOnce() -> T) -> T { - device.push_error_scope(wgpu::ErrorFilter::Validation); + let scope = device.push_error_scope(wgpu::ErrorFilter::Validation); let result = callback(); - if let Some(error) = pollster::block_on(device.pop_error_scope()) { + if let Some(error) = pollster::block_on(scope.pop()) { panic!( "`valid` block at {} encountered wgpu error:\n{error}", std::panic::Location::caller() @@ -95,9 +95,9 @@ fn did_fill_error_scope( callback: impl FnOnce() -> T, filter: wgpu::ErrorFilter, ) -> (bool, T) { - device.push_error_scope(filter); + let scope = device.push_error_scope(filter); let result = callback(); - let validation_error = pollster::block_on(device.pop_error_scope()); + let validation_error = pollster::block_on(scope.pop()); let failed = validation_error.is_some(); (failed, result) diff --git a/tests/tests/wgpu-gpu/dispatch_workgroups_indirect.rs b/tests/tests/wgpu-gpu/dispatch_workgroups_indirect.rs index c6c3720749f..900f84b3bb2 100644 --- a/tests/tests/wgpu-gpu/dispatch_workgroups_indirect.rs +++ b/tests/tests/wgpu-gpu/dispatch_workgroups_indirect.rs @@ -77,7 +77,7 @@ static RESET_BIND_GROUPS: GpuTestConfiguration = GpuTestConfiguration::new() }).enable_noop(), ) .run_async(|ctx| async move { - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + let scope = ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); let test_resources = TestResources::new(&ctx); @@ -98,7 +98,7 @@ static RESET_BIND_GROUPS: GpuTestConfiguration = GpuTestConfiguration::new() } ctx.queue.submit(Some(encoder.finish())); - let error = pollster::block_on(ctx.device.pop_error_scope()); + let error = pollster::block_on(scope.pop()); assert!(error.is_some_and(|error| { format!("{error}").contains("The current set ComputePipeline with '' label expects a BindGroup to be set at index 0") })); @@ -120,7 +120,7 @@ static ZERO_SIZED_BUFFER: GpuTestConfiguration = GpuTestConfiguration::new() .enable_noop(), ) .run_async(|ctx| async move { - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + let scope = ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); let test_resources = TestResources::new(&ctx); @@ -141,7 +141,7 @@ static ZERO_SIZED_BUFFER: GpuTestConfiguration = GpuTestConfiguration::new() } ctx.queue.submit(Some(encoder.finish())); - let error = pollster::block_on(ctx.device.pop_error_scope()); + let error = pollster::block_on(scope.pop()); assert!(error.is_some_and(|error| { format!("{error}").contains( "Indirect buffer uses bytes 0..12 which overruns indirect buffer of size 0", diff --git a/tests/tests/wgpu-gpu/encoder.rs b/tests/tests/wgpu-gpu/encoder.rs index ab1c10594ca..46d7f24eef7 100644 --- a/tests/tests/wgpu-gpu/encoder.rs +++ b/tests/tests/wgpu-gpu/encoder.rs @@ -319,7 +319,7 @@ fn encoder_operations_fail_while_pass_alive(ctx: TestingContext) { let pass = create_pass(&mut encoder, pass_type); - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + let _scope = ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); log::info!("Testing operation {op_name:?} on a locked command encoder while a {pass_type:?} pass is active"); op(&mut encoder); @@ -341,6 +341,9 @@ fn encoder_operations_fail_while_pass_alive(ctx: TestingContext) { drop(pass); fail(&ctx.device, || encoder.finish(), Some("encoder is locked")); + + // We don't care about any errors that happen outside of a `fail` call. + drop(_scope); } } } diff --git a/tests/tests/wgpu-gpu/oom.rs b/tests/tests/wgpu-gpu/oom.rs index c69915b579c..9970d5c340b 100644 --- a/tests/tests/wgpu-gpu/oom.rs +++ b/tests/tests/wgpu-gpu/oom.rs @@ -46,7 +46,7 @@ static TEXTURE_OOM_TEST: GpuTestConfiguration = GpuTestConfiguration::new() .run_async(|ctx| async move { let mut textures = Vec::new(); for _ in 0..LOOP_BOUND { - ctx.device.push_error_scope(ErrorFilter::OutOfMemory); + let scope = ctx.device.push_error_scope(ErrorFilter::OutOfMemory); let texture = ctx.device.create_texture(&TextureDescriptor { label: None, size: Extent3d { @@ -61,7 +61,7 @@ static TEXTURE_OOM_TEST: GpuTestConfiguration = GpuTestConfiguration::new() usage: TextureUsages::RENDER_ATTACHMENT, view_formats: &[], }); - if let Some(err) = ctx.device.pop_error_scope().await { + if let Some(err) = scope.pop().await { match err { Error::OutOfMemory { .. } => { return; @@ -85,14 +85,14 @@ static BUFFER_OOM_TEST: GpuTestConfiguration = GpuTestConfiguration::new() .run_async(|ctx| async move { let mut buffers = Vec::new(); for _ in 0..LOOP_BOUND { - ctx.device.push_error_scope(ErrorFilter::OutOfMemory); + let scope = ctx.device.push_error_scope(ErrorFilter::OutOfMemory); let buffer = ctx.device.create_buffer(&BufferDescriptor { label: None, size: 256 * 1024 * 1024, usage: BufferUsages::STORAGE, mapped_at_creation: false, }); - if let Some(err) = ctx.device.pop_error_scope().await { + if let Some(err) = scope.pop().await { match err { Error::OutOfMemory { .. } => { return; @@ -116,14 +116,14 @@ static MAPPING_BUFFER_OOM_TEST: GpuTestConfiguration = GpuTestConfiguration::new .run_async(|ctx| async move { let mut buffers = Vec::new(); for _ in 0..LOOP_BOUND { - ctx.device.push_error_scope(ErrorFilter::OutOfMemory); + let scope = ctx.device.push_error_scope(ErrorFilter::OutOfMemory); let buffer = ctx.device.create_buffer(&BufferDescriptor { label: None, size: 256 * 1024 * 1024, usage: BufferUsages::COPY_SRC | BufferUsages::MAP_WRITE, mapped_at_creation: false, }); - if let Some(err) = ctx.device.pop_error_scope().await { + if let Some(err) = scope.pop().await { match err { Error::OutOfMemory { .. } => { return; @@ -148,13 +148,13 @@ static QUERY_SET_OOM_TEST: GpuTestConfiguration = GpuTestConfiguration::new() .run_async(|ctx| async move { let mut query_sets = Vec::new(); for _ in 0..LOOP_BOUND { - ctx.device.push_error_scope(ErrorFilter::OutOfMemory); + let scope = ctx.device.push_error_scope(ErrorFilter::OutOfMemory); let query_set = ctx.device.create_query_set(&QuerySetDescriptor { label: None, ty: QueryType::Occlusion, count: 4096, }); - if let Some(err) = ctx.device.pop_error_scope().await { + if let Some(err) = scope.pop().await { match err { Error::OutOfMemory { .. } => { return; @@ -179,7 +179,7 @@ static BLAS_OOM_TEST: GpuTestConfiguration = GpuTestConfiguration::new() .run_async(|ctx| async move { let mut blases = Vec::new(); for _ in 0..LOOP_BOUND { - ctx.device.push_error_scope(ErrorFilter::OutOfMemory); + let scope = ctx.device.push_error_scope(ErrorFilter::OutOfMemory); let blas = ctx.device.create_blas( &CreateBlasDescriptor { label: None, @@ -196,7 +196,7 @@ static BLAS_OOM_TEST: GpuTestConfiguration = GpuTestConfiguration::new() }], }, ); - if let Some(err) = ctx.device.pop_error_scope().await { + if let Some(err) = scope.pop().await { match err { Error::OutOfMemory { .. } => { return; @@ -221,14 +221,14 @@ static TLAS_OOM_TEST: GpuTestConfiguration = GpuTestConfiguration::new() .run_async(|ctx| async move { let mut tlases = Vec::new(); for _ in 0..LOOP_BOUND { - ctx.device.push_error_scope(ErrorFilter::OutOfMemory); + let scope = ctx.device.push_error_scope(ErrorFilter::OutOfMemory); let tlas = ctx.device.create_tlas(&CreateTlasDescriptor { label: None, max_instances: 1024 * 1024, flags: AccelerationStructureFlags::PREFER_FAST_TRACE, update_mode: AccelerationStructureUpdateMode::Build, }); - if let Some(err) = ctx.device.pop_error_scope().await { + if let Some(err) = scope.pop().await { match err { Error::OutOfMemory { .. } => { return; diff --git a/tests/tests/wgpu-gpu/pipeline.rs b/tests/tests/wgpu-gpu/pipeline.rs index 7efaae8dd5b..855d0b4b230 100644 --- a/tests/tests/wgpu-gpu/pipeline.rs +++ b/tests/tests/wgpu-gpu/pipeline.rs @@ -44,8 +44,6 @@ static COMPUTE_PIPELINE_DEFAULT_LAYOUT_BAD_MODULE: GpuTestConfiguration = GpuTestConfiguration::new() .parameters(TestParameters::default().enable_noop()) .run_sync(|ctx| { - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); - fail( &ctx.device, || { @@ -78,8 +76,6 @@ static COMPUTE_PIPELINE_DEFAULT_LAYOUT_BAD_BGL_INDEX: GpuTestConfiguration = .enable_noop(), ) .run_sync(|ctx| { - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); - fail( &ctx.device, || { @@ -107,8 +103,6 @@ static RENDER_PIPELINE_DEFAULT_LAYOUT_BAD_MODULE: GpuTestConfiguration = GpuTestConfiguration::new() .parameters(TestParameters::default().enable_noop()) .run_sync(|ctx| { - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); - fail( &ctx.device, || { @@ -148,8 +142,6 @@ static RENDER_PIPELINE_DEFAULT_LAYOUT_BAD_BGL_INDEX: GpuTestConfiguration = .enable_noop(), ) .run_sync(|ctx| { - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); - fail( &ctx.device, || { diff --git a/tests/tests/wgpu-gpu/query_set.rs b/tests/tests/wgpu-gpu/query_set.rs index 4aee905d3fb..a1aed7cbabe 100644 --- a/tests/tests/wgpu-gpu/query_set.rs +++ b/tests/tests/wgpu-gpu/query_set.rs @@ -10,7 +10,7 @@ static DROP_FAILED_TIMESTAMP_QUERY_SET: GpuTestConfiguration = GpuTestConfigurat .run_sync(|ctx| { // Enter an error scope, so the validation catch-all doesn't // report the error too early. - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + let scope = ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); // Creating this query set should fail, since we didn't include // TIMESTAMP_QUERY in our required features. @@ -23,5 +23,5 @@ static DROP_FAILED_TIMESTAMP_QUERY_SET: GpuTestConfiguration = GpuTestConfigurat // Dropping this should not panic. drop(bad_query_set); - assert!(pollster::block_on(ctx.device.pop_error_scope()).is_some()); + assert!(pollster::block_on(scope.pop()).is_some()); }); diff --git a/tests/tests/wgpu-gpu/shader/compilation_messages/mod.rs b/tests/tests/wgpu-gpu/shader/compilation_messages/mod.rs index 9304836a15e..bedcdedd9fe 100644 --- a/tests/tests/wgpu-gpu/shader/compilation_messages/mod.rs +++ b/tests/tests/wgpu-gpu/shader/compilation_messages/mod.rs @@ -24,11 +24,11 @@ static SHADER_COMPILE_SUCCESS: GpuTestConfiguration = GpuTestConfiguration::new( static SHADER_COMPILE_ERROR: GpuTestConfiguration = GpuTestConfiguration::new() .parameters(TestParameters::default().enable_noop()) .run_async(|ctx| async move { - ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); + let scope = ctx.device.push_error_scope(wgpu::ErrorFilter::Validation); let sm = ctx .device .create_shader_module(include_wgsl!("error_shader.wgsl")); - assert!(pollster::block_on(ctx.device.pop_error_scope()).is_some()); + assert!(pollster::block_on(scope.pop()).is_some()); let compilation_info = sm.get_compilation_info().await; let error_message = compilation_info diff --git a/tests/tests/wgpu-validation/api/error_scopes.rs b/tests/tests/wgpu-validation/api/error_scopes.rs index 90352f4933f..85b4837f71e 100644 --- a/tests/tests/wgpu-validation/api/error_scopes.rs +++ b/tests/tests/wgpu-validation/api/error_scopes.rs @@ -1,42 +1,129 @@ #![cfg(not(target_arch = "wasm32"))] -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, +use std::{ + panic::{resume_unwind, AssertUnwindSafe}, + sync::Arc, }; +use parking_lot::Mutex; + +const ERR: &str = "Buffer size 9223372036854775808 is greater than the maximum buffer size"; +fn raise_validation_error(device: &wgpu::Device) { + let _buffer = device.create_buffer(&wgpu::BufferDescriptor { + label: None, + size: 1 << 63, // Too large! + usage: wgpu::BufferUsages::COPY_SRC, + mapped_at_creation: false, + }); +} + +fn register_uncaptured_error_handler(device: &wgpu::Device) -> Arc>> { + let errors = Arc::new(Mutex::new(Vec::new())); + let errors_clone = errors.clone(); + + device.on_uncaptured_error(Arc::new(move |error| { + errors_clone.lock().push(error); + })); + + errors +} + +fn assert_matches_string(error: &wgpu::Error, substr: &str) { + let err_str = error.to_string(); + assert!( + err_str.contains(substr), + "Error string '{err_str}' does not contain expected substring '{substr}'" + ); +} + +// Test that error scopes work correctly in the basic case. +#[test] +fn basic() { + let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default()); + + let scope = device.push_error_scope(wgpu::ErrorFilter::Validation); + raise_validation_error(&device); + let error = pollster::block_on(scope.pop()); + + assert!(error.is_some()); +} + // Test that error scopes are thread-local: an error scope pushed on one thread // does not capture errors generated on another thread. #[test] fn multi_threaded_scopes() { let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default()); - let other_thread_error = Arc::new(AtomicBool::new(false)); - let other_thread_error_clone = other_thread_error.clone(); - // Start an error scope on the main thread. - device.push_error_scope(wgpu::ErrorFilter::Validation); + let scope = device.push_error_scope(wgpu::ErrorFilter::Validation); // Register an uncaptured error handler to catch errors from other threads. - device.on_uncaptured_error(Arc::new(move |_error| { - other_thread_error_clone.store(true, Ordering::Relaxed); - })); + let other_thread_error = register_uncaptured_error_handler(&device); // Do something invalid on another thread. std::thread::scope(|s| { s.spawn(|| { - let _buffer = device.create_buffer(&wgpu::BufferDescriptor { - label: None, - size: 1 << 63, // Too large! - usage: wgpu::BufferUsages::COPY_SRC, - mapped_at_creation: false, - }); + raise_validation_error(&device); }); }); // Pop the error scope on the main thread. - let error = pollster::block_on(device.pop_error_scope()); + let error = pollster::block_on(scope.pop()); // The main thread's error scope should not have captured the other thread's error. assert!(error.is_none()); // The other thread's error should have been reported to the uncaptured error handler. - assert!(other_thread_error.load(Ordering::Relaxed)); + let uncaptured_errors = other_thread_error.lock(); + assert_eq!(uncaptured_errors.len(), 1); + assert_matches_string(&uncaptured_errors[0], ERR); +} + +// Test that error scopes error when popped in the wrong order. +#[test] +#[should_panic(expected = "error scopes must be popped in reverse order")] +fn pop_out_of_order() { + let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default()); + + let scope1 = device.push_error_scope(wgpu::ErrorFilter::Validation); + let _scope2 = device.push_error_scope(wgpu::ErrorFilter::Validation); + + let _ = pollster::block_on(scope1.pop()); +} + +// Test that error scopes are automatically popped when dropped. +#[test] +fn drop_automatically_pops() { + let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default()); + + let uncaptured_error = register_uncaptured_error_handler(&device); + + let scope = device.push_error_scope(wgpu::ErrorFilter::Validation); + raise_validation_error(&device); + drop(scope); // Automatically pops the error scope. + + assert!(uncaptured_error.lock().is_empty()); + + // Raising another error will go to the uncaptured error handler, not the dropped scope. + raise_validation_error(&device); + + assert_eq!(uncaptured_error.lock().len(), 1); + assert_matches_string(&uncaptured_error.lock()[0], ERR); +} + +// Test that error scopes are automatically popped when dropped during unwinding, +// even when they are dropped out of order. +#[test] +fn drop_during_unwind() { + let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default()); + + let scope1 = device.push_error_scope(wgpu::ErrorFilter::Validation); + let scope2 = device.push_error_scope(wgpu::ErrorFilter::Validation); + + let res = std::panic::catch_unwind(AssertUnwindSafe(|| { + raise_validation_error(&device); + // Move scope1 so that it is dropped before scope2. + let _scope2 = scope2; + let _scope1 = scope1; + resume_unwind(Box::new("unwind")) + })); + + assert_eq!(*res.unwrap_err().downcast_ref::<&str>().unwrap(), "unwind"); } diff --git a/wgpu/src/api/device.rs b/wgpu/src/api/device.rs index b0c48ff7202..98786c8e1ea 100644 --- a/wgpu/src/api/device.rs +++ b/wgpu/src/api/device.rs @@ -1,7 +1,7 @@ use alloc::{boxed::Box, string::String, sync::Arc, vec}; #[cfg(wgpu_core)] use core::ops::Deref; -use core::{error, fmt, future::Future}; +use core::{error, fmt, future::Future, marker::PhantomData}; use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, CreateBlasDescriptor}; use crate::api::tlas::{CreateTlasDescriptor, Tlas}; @@ -214,7 +214,7 @@ impl Device { let encoder = self.inner.create_render_bundle_encoder(desc); RenderBundleEncoder { inner: encoder, - _p: core::marker::PhantomData, + _p: PhantomData, } } @@ -410,10 +410,14 @@ impl Device { self.inner.on_uncaptured_error(handler) } - /// Push an error scope on this device's error scope stack. - /// All operations on this device, or on resources created from - /// this device, will have their errors captured by this scope - /// until a corresponding pop is made. + /// Push an error scope on this device's thread-local error scope + /// stack. All operations on this device, or on resources created + /// from this device, will have their errors captured by this scope + /// until the scope is popped. + /// + /// Scopes must be popped in reverse order to their creation. If + /// a guard is dropped without being `pop()`ped, the scope will be + /// popped, and the captured errors will be dropped. /// /// Multiple error scopes may be active at one time, forming a stack. /// Each error will be reported to the inner-most scope that matches @@ -421,21 +425,31 @@ impl Device { /// /// With the `std` feature enabled, this stack is **thread-local**. /// Without, this is **global** to all threads. - pub fn push_error_scope(&self, filter: ErrorFilter) { - self.inner.push_error_scope(filter) - } - - /// Pop an error scope from this device's error scope stack. Returns - /// a future which resolves to the error captured by this scope, if any. /// - /// This will pop the most recently pushed error scope on this device. - /// - /// If there are no error scopes on this device, this will panic. - /// - /// With the `std` feature enabled, the error stack is **thread-local**. - /// Without, this is **global** to all threads. - pub fn pop_error_scope(&self) -> impl Future> + WasmNotSend { - self.inner.pop_error_scope() + /// ```rust + /// # async move { + /// # let device: wgpu::Device = unreachable!(); + /// let error_scope = device.push_error_scope(wgpu::ErrorFilter::Validation); + /// + /// // ... + /// // do work that may produce validation errors + /// // ... + /// + /// // pop the error scope and get a future for the result + /// let error_future = error_scope.pop(); + /// + /// // await the future to get the error, if any + /// let error = error_future.await; + /// # }; + /// ``` + pub fn push_error_scope(&self, filter: ErrorFilter) -> ErrorScopeGuard { + let index = self.inner.push_error_scope(filter); + ErrorScopeGuard { + device: self.inner.clone(), + index, + popped: false, + _phantom: PhantomData, + } } /// Starts a capture in the attached graphics debugger. @@ -812,3 +826,43 @@ impl fmt::Display for Error { } } } + +/// Guard for an error scope pushed with [`Device::push_error_scope()`]. +/// +/// Call [`pop()`] to pop the scope and get a future for the result. If +/// the guard is dropped without being popped explicitly, the scope will still be popped, +/// and the captured errors will be dropped. +/// +/// This guard is neither `Send` nor `Sync`, as error scopes are handled +/// on a per-thread basis when the `std` feature is enabled. +/// +/// [`pop()`]: ErrorScopeGuard::pop +#[must_use = "Error scopes must be explicitly popped to retrieve errors they catch"] +pub struct ErrorScopeGuard { + device: dispatch::DispatchDevice, + index: u32, + popped: bool, + // Ensure the guard is !Send and !Sync + _phantom: PhantomData<*mut ()>, +} + +static_assertions::assert_not_impl_any!(ErrorScopeGuard: Send, Sync); + +impl ErrorScopeGuard { + /// Pops the error scope. + /// + /// Returns a future which resolves to the error captured by this scope, if any. + /// The pop takes effect immediately; the future does not need to be awaited before doing work that is outside of this error scope. + pub fn pop(mut self) -> impl Future> + WasmNotSend { + self.popped = true; + self.device.pop_error_scope(self.index) + } +} + +impl Drop for ErrorScopeGuard { + fn drop(&mut self) { + if !self.popped { + drop(self.device.pop_error_scope(self.index)); + } + } +} diff --git a/wgpu/src/backend/webgpu.rs b/wgpu/src/backend/webgpu.rs index 24502f912fb..36e755ab209 100644 --- a/wgpu/src/backend/webgpu.rs +++ b/wgpu/src/backend/webgpu.rs @@ -15,8 +15,7 @@ use alloc::{ vec::Vec, }; use core::{ - cell::OnceCell, - cell::RefCell, + cell::{Cell, OnceCell, RefCell}, fmt, future::Future, ops::Range, @@ -945,6 +944,7 @@ fn future_request_device( WebDevice { inner: device, ident: crate::cmp::Identifier::create(), + error_scope_count: Rc::new(Cell::new(0)), } .into(), WebQueue { @@ -1168,6 +1168,8 @@ pub struct WebDevice { pub(crate) inner: webgpu_sys::GpuDevice, /// Unique identifier for this Device. ident: crate::cmp::Identifier, + /// Current number of error scopes that have been pushed on the device. + error_scope_count: Rc>, } #[derive(Debug, Clone)] @@ -2480,15 +2482,36 @@ impl dispatch::DeviceInterface for WebDevice { f.forget(); } - fn push_error_scope(&self, filter: crate::ErrorFilter) { + fn push_error_scope(&self, filter: crate::ErrorFilter) -> u32 { + let index = self.error_scope_count.get(); + self.error_scope_count.set( + index + .checked_add(1) + .expect("Greater than 2^32 nested error scopes"), + ); self.inner.push_error_scope(match filter { crate::ErrorFilter::OutOfMemory => webgpu_sys::GpuErrorFilter::OutOfMemory, crate::ErrorFilter::Validation => webgpu_sys::GpuErrorFilter::Validation, crate::ErrorFilter::Internal => webgpu_sys::GpuErrorFilter::Internal, }); + index } - fn pop_error_scope(&self) -> Pin> { + fn pop_error_scope(&self, index: u32) -> Pin> { + let current_scope_count = self.error_scope_count.get(); + let is_panicking = crate::util::is_panicking(); + if current_scope_count == 0 && !is_panicking { + panic!("Mismatched pop_error_scope call: no error scope for this thread. Error scopes are thread-local."); + } + if index + 1 != current_scope_count && !is_panicking { + panic!( + "Mismatched pop_error_scope call: error scopes must be popped in reverse order." + ); + } + // Decrement the error scope count. We've asserted that the current + // size is `index + 1` above. + self.error_scope_count.set(index); + let error_promise = self.inner.pop_error_scope(); Box::pin(MakeSendFuture::new( wasm_bindgen_futures::JsFuture::from(error_promise), diff --git a/wgpu/src/backend/wgpu_core.rs b/wgpu/src/backend/wgpu_core.rs index 90e20e44e4a..09db4dc0995 100644 --- a/wgpu/src/backend/wgpu_core.rs +++ b/wgpu/src/backend/wgpu_core.rs @@ -1803,22 +1803,58 @@ impl dispatch::DeviceInterface for CoreDevice { error_sink.uncaptured_handler = Some(handler); } - fn push_error_scope(&self, filter: crate::ErrorFilter) { + fn push_error_scope(&self, filter: crate::ErrorFilter) -> u32 { let mut error_sink = self.error_sink.lock(); let thread_id = thread_id::ThreadId::current(); let scopes = error_sink.scopes.entry(thread_id).or_default(); + let index = scopes + .len() + .try_into() + .expect("Greater than 2^32 nested error scopes"); scopes.push(ErrorScope { error: None, filter, }); + index } - fn pop_error_scope(&self) -> Pin> { + fn pop_error_scope(&self, index: u32) -> Pin> { let mut error_sink = self.error_sink.lock(); + + // We go out of our way to avoid panicking while unwinding, because that would abort the process, + // and we are supposed to just drop the error scope on the floor. + let is_panicking = crate::util::is_panicking(); let thread_id = thread_id::ThreadId::current(); let err = "Mismatched pop_error_scope call: no error scope for this thread. Error scopes are thread-local."; - let scopes = error_sink.scopes.get_mut(&thread_id).expect(err); - let scope = scopes.pop().expect(err); + let scopes = match error_sink.scopes.get_mut(&thread_id) { + Some(s) => s, + None => { + if !is_panicking { + panic!("{err}"); + } else { + return Box::pin(ready(None)); + } + } + }; + if scopes.is_empty() && !is_panicking { + panic!("{err}"); + } + if index as usize != scopes.len() - 1 && !is_panicking { + panic!( + "Mismatched pop_error_scope call: error scopes must be popped in reverse order." + ); + } + + // It would be more correct in this case to use `remove` here so that when unwinding is occurring + // we would remove the correct error scope, but we don't have such a primitive on the web + // and having consistent behavior here is more important. If you are unwinding and it unwinds + // the guards in the wrong order, it's totally reasonable to have incorrect behavior. + let scope = match scopes.pop() { + Some(s) => s, + None if !is_panicking => unreachable!(), + None => return Box::pin(ready(None)), + }; + Box::pin(ready(scope.error)) } diff --git a/wgpu/src/dispatch.rs b/wgpu/src/dispatch.rs index 4534c1b9713..b4d0a599310 100644 --- a/wgpu/src/dispatch.rs +++ b/wgpu/src/dispatch.rs @@ -193,8 +193,9 @@ pub trait DeviceInterface: CommonTraits { fn set_device_lost_callback(&self, device_lost_callback: BoxDeviceLostCallback); fn on_uncaptured_error(&self, handler: Arc); - fn push_error_scope(&self, filter: crate::ErrorFilter); - fn pop_error_scope(&self) -> Pin>; + // Returns index on the stack of the pushed error scope. + fn push_error_scope(&self, filter: crate::ErrorFilter) -> u32; + fn pop_error_scope(&self, index: u32) -> Pin>; unsafe fn start_graphics_debugger_capture(&self); unsafe fn stop_graphics_debugger_capture(&self); diff --git a/wgpu/src/util/mod.rs b/wgpu/src/util/mod.rs index 2f524fc74c2..9a4290278d1 100644 --- a/wgpu/src/util/mod.rs +++ b/wgpu/src/util/mod.rs @@ -11,6 +11,7 @@ mod device; mod encoder; mod init; mod mutex; +mod panicking; mod texture_blitter; use alloc::{borrow::Cow, format, string::String, vec}; @@ -28,6 +29,7 @@ pub use wgt::{ }; pub(crate) use mutex::Mutex; +pub(crate) use panicking::is_panicking; use crate::dispatch; diff --git a/wgpu/src/util/panicking.rs b/wgpu/src/util/panicking.rs new file mode 100644 index 00000000000..12ef903b80b --- /dev/null +++ b/wgpu/src/util/panicking.rs @@ -0,0 +1,9 @@ +#[cfg(feature = "std")] +pub fn is_panicking() -> bool { + std::thread::panicking() +} + +#[cfg(not(feature = "std"))] +pub fn is_panicking() -> bool { + false +}