diff --git a/wgpu-core/src/device/resource.rs b/wgpu-core/src/device/resource.rs index 176995a3a06..eec982e8005 100644 --- a/wgpu-core/src/device/resource.rs +++ b/wgpu-core/src/device/resource.rs @@ -9,7 +9,7 @@ use core::{ fmt, mem::{self, ManuallyDrop}, num::NonZeroU32, - sync::atomic::{AtomicBool, Ordering}, + sync::atomic::{AtomicBool, AtomicU32, Ordering}, }; use hal::ShouldBeNonZeroExt; @@ -278,6 +278,8 @@ pub struct Device { pub(crate) enum DeferredDestroy { TextureViews(WeakVec), BindGroups(WeakVec), + Buffer(Weak), + Texture(Weak), } impl fmt::Debug for Device { @@ -705,6 +707,18 @@ impl Device { } } } + DeferredDestroy::Buffer(buffer) => { + // Call destroy() now that we're in a safe context (no locks held). + if let Some(buffer) = buffer.upgrade() { + buffer.destroy(); + } + } + DeferredDestroy::Texture(texture) => { + // Call destroy() now that we're in a safe context (no locks held). + if let Some(texture) = texture.upgrade() { + texture.destroy(); + } + } } } } @@ -1051,6 +1065,8 @@ impl Device { bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()), timestamp_normalization_bind_group, indirect_validation_bind_groups, + destroyed: AtomicBool::new(false), + in_flight_count: AtomicU32::new(0), }; let buffer = Arc::new(buffer); @@ -1234,6 +1250,8 @@ impl Device { bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()), timestamp_normalization_bind_group, indirect_validation_bind_groups, + destroyed: AtomicBool::new(false), + in_flight_count: AtomicU32::new(0), }; let buffer = Arc::new(buffer); diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index b5290f2605e..fe6c3c2eed1 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -6,6 +6,7 @@ use core::{ num::NonZeroU64, ops::Range, ptr::NonNull, + sync::atomic::{AtomicBool, AtomicU32, Ordering}, }; use smallvec::SmallVec; use thiserror::Error; @@ -438,6 +439,14 @@ pub struct Buffer { pub(crate) bind_groups: Mutex>, pub(crate) timestamp_normalization_bind_group: Snatchable, pub(crate) indirect_validation_bind_groups: Snatchable, + /// Set to true when destroy() is called. This makes check_destroyed() fail immediately, + /// but the actual raw resource cleanup may be deferred if in_flight_count > 0. + pub(crate) destroyed: AtomicBool, + /// Count of command buffer trackers that currently hold a reference to this buffer. + /// Incremented when inserted into a command buffer's tracker, decremented when the + /// tracker is cleared (on command buffer submission or drop). + /// When destroy() is called and this is > 0, raw resource cleanup is deferred to drop(). + pub(crate) in_flight_count: AtomicU32, } impl Drop for Buffer { @@ -472,6 +481,11 @@ impl Buffer { &self, guard: &SnatchGuard, ) -> Result<(), DestroyedResourceError> { + // Check the destroyed flag first - this is set by destroy() even if + // the raw resource hasn't been snatched yet (deferred cleanup case). + if self.destroyed.load(Ordering::Acquire) { + return Err(DestroyedResourceError(self.error_ident())); + } self.raw .get(guard) .map(|_| ()) @@ -903,8 +917,23 @@ impl Buffer { } pub fn destroy(self: &Arc) { + // Mark as destroyed. This makes check_destroyed() fail, + // so any future use of this buffer will return an error. + self.destroyed.store(true, Ordering::Release); + let device = &self.device; + // Check if this buffer is currently in any command buffer's tracker. + // If so, we should NOT snatch the raw resource yet - the tracker's Drop + // will call destroy() again when the count reaches 0. + if self.in_flight_count.load(Ordering::Acquire) > 0 { + // Buffer is in a recording command buffer. + // The raw resource will be cleaned up when the command buffer is + // submitted/dropped and the tracker calls destroy() again. + return; + } + + // Not in any command buffer tracker, safe to snatch and cleanup now. let temp = { let mut snatch_guard = device.snatchable_lock.write(); @@ -1255,6 +1284,14 @@ pub struct Texture { pub(crate) clear_mode: RwLock, pub(crate) views: Mutex>, pub(crate) bind_groups: Mutex>, + /// Set to true when destroy() is called. This makes check_destroyed() fail immediately, + /// but the actual raw resource cleanup may be deferred if in_flight_count > 0. + pub(crate) destroyed: AtomicBool, + /// Count of command buffer trackers that currently hold a reference to this texture. + /// Incremented when inserted into a command buffer's tracker, decremented when the + /// tracker is cleared (on command buffer submission or drop). + /// When destroy() is called and this is > 0, raw resource cleanup is deferred to drop(). + pub(crate) in_flight_count: AtomicU32, } impl Texture { @@ -1290,6 +1327,8 @@ impl Texture { clear_mode: RwLock::new(rank::TEXTURE_CLEAR_MODE, clear_mode), views: Mutex::new(rank::TEXTURE_VIEWS, WeakVec::new()), bind_groups: Mutex::new(rank::TEXTURE_BIND_GROUPS, WeakVec::new()), + destroyed: AtomicBool::new(false), + in_flight_count: AtomicU32::new(0), } } @@ -1353,6 +1392,19 @@ impl RawResourceAccess for Texture { fn raw<'a>(&'a self, guard: &'a SnatchGuard) -> Option<&'a Self::DynResource> { self.inner.get(guard).map(|t| t.raw()) } + + fn try_raw<'a>( + &'a self, + guard: &'a SnatchGuard, + ) -> Result<&'a Self::DynResource, DestroyedResourceError> { + // Check the destroyed flag first - this is set by destroy() even if + // the raw resource hasn't been snatched yet (deferred cleanup case). + if self.destroyed.load(Ordering::Acquire) { + return Err(DestroyedResourceError(self.error_ident())); + } + self.raw(guard) + .ok_or_else(|| DestroyedResourceError(self.error_ident())) + } } impl Texture { @@ -1360,6 +1412,11 @@ impl Texture { &'a self, guard: &'a SnatchGuard, ) -> Result<&'a TextureInner, DestroyedResourceError> { + // Check the destroyed flag first - this is set by destroy() even if + // the raw resource hasn't been snatched yet (deferred cleanup case). + if self.destroyed.load(Ordering::Acquire) { + return Err(DestroyedResourceError(self.error_ident())); + } self.inner .get(guard) .ok_or_else(|| DestroyedResourceError(self.error_ident())) @@ -1369,6 +1426,11 @@ impl Texture { &self, guard: &SnatchGuard, ) -> Result<(), DestroyedResourceError> { + // Check the destroyed flag first - this is set by destroy() even if + // the raw resource hasn't been snatched yet (deferred cleanup case). + if self.destroyed.load(Ordering::Acquire) { + return Err(DestroyedResourceError(self.error_ident())); + } self.inner .get(guard) .map(|_| ()) @@ -1405,8 +1467,23 @@ impl Texture { } pub fn destroy(self: &Arc) { + // Mark as destroyed. This makes check_destroyed() fail, + // so any future use of this texture will return an error. + self.destroyed.store(true, Ordering::Release); + let device = &self.device; + // Check if this texture is currently in any command buffer's tracker. + // If so, we should NOT snatch the raw resource yet - the tracker's Drop + // will call destroy() again when the count reaches 0. + if self.in_flight_count.load(Ordering::Acquire) > 0 { + // Texture is in a recording command buffer. + // The raw resource will be cleaned up when the command buffer is + // submitted/dropped and the tracker calls destroy() again. + return; + } + + // Not in any command buffer tracker, safe to snatch and cleanup now. let temp = { let raw = match self.inner.snatch(&mut device.snatchable_lock.write()) { Some(TextureInner::Native { raw }) => raw, diff --git a/wgpu-core/src/track/buffer.rs b/wgpu-core/src/track/buffer.rs index 1677765604e..ae50b50abf3 100644 --- a/wgpu-core/src/track/buffer.rs +++ b/wgpu-core/src/track/buffer.rs @@ -8,6 +8,7 @@ use alloc::{ sync::{Arc, Weak}, vec::Vec, }; +use core::sync::atomic::Ordering; use hal::BufferBarrier; use wgt::{strict_assert, strict_assert_eq, BufferUses}; @@ -530,6 +531,11 @@ impl BufferTracker { let currently_owned = unsafe { self.metadata.contains_unchecked(index) }; if !currently_owned { + // Increment the in_flight_count for the buffer being inserted. + // This is decremented when the tracker is dropped. + let buffer = unsafe { metadata_provider.get(index) }; + buffer.in_flight_count.fetch_add(1, Ordering::Release); + unsafe { insert( Some(&mut self.start), @@ -552,6 +558,25 @@ impl BufferTracker { } } +impl Drop for BufferTracker { + fn drop(&mut self) { + // Decrement in_flight_count for all buffers in this tracker. + // If this was the last command buffer using a destroyed buffer, + // queue it for deferred destruction (we can't call destroy() directly + // here as it might cause lock recursion). + for buffer in self.metadata.owned_resources() { + let prev_count = buffer.in_flight_count.fetch_sub(1, Ordering::Release); + if prev_count == 1 && buffer.destroyed.load(Ordering::Acquire) { + // This was the last command buffer using this destroyed buffer. + // Queue it for deferred destruction. + buffer.device.deferred_destroy.lock().push( + crate::device::resource::DeferredDestroy::Buffer(Arc::downgrade(buffer)), + ); + } + } + } +} + /// Stores all buffer state within a device. pub(crate) struct DeviceBufferTracker { current_states: Vec, diff --git a/wgpu-core/src/track/texture.rs b/wgpu-core/src/track/texture.rs index efdf0c356df..4b2f1f61fec 100644 --- a/wgpu-core/src/track/texture.rs +++ b/wgpu-core/src/track/texture.rs @@ -38,7 +38,7 @@ use alloc::{ sync::{Arc, Weak}, vec::{Drain, Vec}, }; -use core::iter; +use core::{iter, sync::atomic::Ordering}; impl ResourceUses for TextureUses { const EXCLUSIVE: Self = Self::EXCLUSIVE; @@ -667,6 +667,25 @@ impl TextureTrackerSetSingle for TextureTracker { } } +impl Drop for TextureTracker { + fn drop(&mut self) { + // Decrement in_flight_count for all textures in this tracker. + // If this was the last command buffer using a destroyed texture, + // queue it for deferred destruction (we can't call destroy() directly + // here as it might cause lock recursion). + for texture in self.metadata.owned_resources() { + let prev_count = texture.in_flight_count.fetch_sub(1, Ordering::Release); + if prev_count == 1 && texture.destroyed.load(Ordering::Acquire) { + // This was the last command buffer using this destroyed texture. + // Queue it for deferred destruction. + texture.device.deferred_destroy.lock().push( + crate::device::resource::DeferredDestroy::Texture(Arc::downgrade(texture)), + ); + } + } + } +} + /// Stores all texture state within a device. pub(crate) struct DeviceTextureTracker { current_state_set: TextureStateSet, @@ -1038,6 +1057,11 @@ unsafe fn insert_or_barrier_update( let currently_owned = unsafe { resource_metadata.contains_unchecked(index) }; if !currently_owned { + // Increment the in_flight_count for the texture being inserted. + // This is decremented when the tracker is dropped. + let texture = unsafe { metadata_provider.get(index) }; + texture.in_flight_count.fetch_add(1, Ordering::Release); + unsafe { insert( Some(texture_selector), diff --git a/wgpu-hal/src/metal/mod.rs b/wgpu-hal/src/metal/mod.rs index 0d404c146cf..9f16bd6e7f4 100644 --- a/wgpu-hal/src/metal/mod.rs +++ b/wgpu-hal/src/metal/mod.rs @@ -323,19 +323,11 @@ struct PrivateDisabilities { broken_layered_clear_image: bool, } -#[derive(Debug)] +#[derive(Debug, Default)] struct Settings { retain_command_buffer_references: bool, } -impl Default for Settings { - fn default() -> Self { - Self { - retain_command_buffer_references: true, - } - } -} - struct AdapterShared { device: Mutex, disabilities: PrivateDisabilities,