From ae9d12da6afe9c3316d88198e4cf0946666d5e72 Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Mon, 18 Dec 2023 10:37:59 +0100 Subject: [PATCH 1/2] Simplify some code around buffer unmapping --- wgpu-core/src/device/global.rs | 96 +++++++++++++++++----------------- wgpu-core/src/resource.rs | 70 ++++++++++++------------- 2 files changed, 80 insertions(+), 86 deletions(-) diff --git a/wgpu-core/src/device/global.rs b/wgpu-core/src/device/global.rs index ded8c41da8..54627cd650 100644 --- a/wgpu-core/src/device/global.rs +++ b/wgpu-core/src/device/global.rs @@ -486,10 +486,15 @@ impl Global { let hub = A::hub(self); - let mut buffer_guard = hub.buffers.write(); - let buffer = buffer_guard - .get_and_mark_destroyed(buffer_id) - .map_err(|_| resource::DestroyError::Invalid)?; + let buffer = { + let mut buffer_guard = hub.buffers.write(); + buffer_guard + .get_and_mark_destroyed(buffer_id) + .map_err(|_| resource::DestroyError::Invalid)? + }; + + let _ = buffer.unmap(); + buffer.destroy() } @@ -499,37 +504,40 @@ impl Global { let hub = A::hub(self); - if let Some(buffer) = hub.buffers.unregister(buffer_id) { - if buffer.ref_count() == 1 { - buffer.destroy().ok(); + let buffer = match hub.buffers.unregister(buffer_id) { + Some(buffer) => buffer, + None => { + return; } + }; - let last_submit_index = buffer.info.submission_index(); + let _ = buffer.unmap(); - let device = buffer.device.clone(); + let last_submit_index = buffer.info.submission_index(); - if device - .pending_writes - .lock() - .as_ref() - .unwrap() - .dst_buffers - .contains_key(&buffer_id) - { - device.lock_life().future_suspected_buffers.push(buffer); - } else { - device - .lock_life() - .suspected_resources - .buffers - .insert(buffer_id, buffer); - } + let device = buffer.device.clone(); - if wait { - match device.wait_for_submit(last_submit_index) { - Ok(()) => (), - Err(e) => log::error!("Failed to wait for buffer {:?}: {}", buffer_id, e), - } + if device + .pending_writes + .lock() + .as_ref() + .unwrap() + .dst_buffers + .contains_key(&buffer_id) + { + device.lock_life().future_suspected_buffers.push(buffer); + } else { + device + .lock_life() + .suspected_resources + .buffers + .insert(buffer_id, buffer); + } + + if wait { + match device.wait_for_submit(last_submit_index) { + Ok(()) => (), + Err(e) => log::error!("Failed to wait for buffer {:?}: {}", buffer_id, e), } } } @@ -2503,27 +2511,17 @@ impl Global { profiling::scope!("unmap", "Buffer"); api_log!("Buffer::unmap {buffer_id:?}"); - let closure; - { - // Restrict the locks to this scope. - let hub = A::hub(self); + let hub = A::hub(self); - let buffer = hub - .buffers - .get(buffer_id) - .map_err(|_| BufferAccessError::Invalid)?; - if !buffer.device.is_valid() { - return Err(DeviceError::Lost.into()); - } - closure = buffer.buffer_unmap_inner() - } + let buffer = hub + .buffers + .get(buffer_id) + .map_err(|_| BufferAccessError::Invalid)?; - // Note: outside the scope where locks are held when calling the callback - if let Some((mut operation, status)) = closure? { - if let Some(callback) = operation.callback.take() { - callback.call(status); - } + if !buffer.device.is_valid() { + return Err(DeviceError::Lost.into()); } - Ok(()) + + buffer.unmap() } } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index a1a35b2f32..c9857c1f30 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -427,9 +427,18 @@ impl Buffer { self.raw.get(guard).unwrap() } - pub(crate) fn buffer_unmap_inner( - self: &Arc, - ) -> Result, BufferAccessError> { + // Note: This must not be called while holding a lock. + pub(crate) fn unmap(self: &Arc) -> Result<(), BufferAccessError> { + if let Some((mut operation, status)) = self.unmap_inner()? { + if let Some(callback) = operation.callback.take() { + callback.call(status); + } + } + + Ok(()) + } + + fn unmap_inner(self: &Arc) -> Result, BufferAccessError> { use hal::Device; let device = &self.device; @@ -537,43 +546,30 @@ impl Buffer { } pub(crate) fn destroy(self: &Arc) -> Result<(), DestroyError> { - let map_closure; - // Restrict the locks to this scope. - { - let device = &self.device; - let buffer_id = self.info.id(); - - map_closure = self.buffer_unmap_inner().unwrap_or(None); - - #[cfg(feature = "trace")] - if let Some(ref mut trace) = *device.trace.lock() { - trace.add(trace::Action::FreeBuffer(buffer_id)); - } - // Note: a future commit will replace this with a read guard - // and actually snatch the buffer. - let snatch_guard = device.snatchable_lock.read(); - if self.raw.get(&snatch_guard).is_none() { - return Err(resource::DestroyError::AlreadyDestroyed); - } + let device = &self.device; + let buffer_id = self.info.id(); - let temp = queue::TempResource::Buffer(self.clone()); - let mut pending_writes = device.pending_writes.lock(); - let pending_writes = pending_writes.as_mut().unwrap(); - if pending_writes.dst_buffers.contains_key(&buffer_id) { - pending_writes.temp_resources.push(temp); - } else { - let last_submit_index = self.info.submission_index(); - device - .lock_life() - .schedule_resource_destruction(temp, last_submit_index); - } + #[cfg(feature = "trace")] + if let Some(ref mut trace) = *device.trace.lock() { + trace.add(trace::Action::FreeBuffer(buffer_id)); + } + // Note: a future commit will replace this with a read guard + // and actually snatch the buffer. + let snatch_guard = device.snatchable_lock.read(); + if self.raw.get(&snatch_guard).is_none() { + return Err(resource::DestroyError::AlreadyDestroyed); } - // Note: outside the scope where locks are held when calling the callback - if let Some((mut operation, status)) = map_closure { - if let Some(callback) = operation.callback.take() { - callback.call(status); - } + let temp = queue::TempResource::Buffer(self.clone()); + let mut pending_writes = device.pending_writes.lock(); + let pending_writes = pending_writes.as_mut().unwrap(); + if pending_writes.dst_buffers.contains_key(&buffer_id) { + pending_writes.temp_resources.push(temp); + } else { + let last_submit_index = self.info.submission_index(); + device + .lock_life() + .schedule_resource_destruction(temp, last_submit_index); } Ok(()) From e8b611d07c4f0652d1095a5181df14f422a3945b Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Fri, 15 Dec 2023 11:11:56 +0100 Subject: [PATCH 2/2] Snatch raw buffers when destroying them --- wgpu-core/src/device/life.rs | 44 +++++++++++++++++++++++++- wgpu-core/src/device/queue.rs | 5 +-- wgpu-core/src/resource.rs | 59 ++++++++++++++++++++++++++++++----- 3 files changed, 97 insertions(+), 11 deletions(-) diff --git a/wgpu-core/src/device/life.rs b/wgpu-core/src/device/life.rs index 1daba3a6b8..49bd7ebb5d 100644 --- a/wgpu-core/src/device/life.rs +++ b/wgpu-core/src/device/life.rs @@ -14,7 +14,10 @@ use crate::{ TextureViewId, }, pipeline::{ComputePipeline, RenderPipeline}, - resource::{self, Buffer, QuerySet, Resource, Sampler, StagingBuffer, Texture, TextureView}, + resource::{ + self, Buffer, DestroyedBuffer, QuerySet, Resource, Sampler, StagingBuffer, Texture, + TextureView, + }, track::{ResourceTracker, Tracker}, FastHashMap, SubmissionIndex, }; @@ -39,6 +42,7 @@ pub(crate) struct ResourceMaps { pub pipeline_layouts: FastHashMap>>, pub render_bundles: FastHashMap>>, pub query_sets: FastHashMap>>, + pub destroyed_buffers: FastHashMap>>, } impl ResourceMaps { @@ -56,6 +60,7 @@ impl ResourceMaps { pipeline_layouts: FastHashMap::default(), render_bundles: FastHashMap::default(), query_sets: FastHashMap::default(), + destroyed_buffers: FastHashMap::default(), } } @@ -73,6 +78,7 @@ impl ResourceMaps { pipeline_layouts, render_bundles, query_sets, + destroyed_buffers, } = self; buffers.clear(); staging_buffers.clear(); @@ -86,6 +92,7 @@ impl ResourceMaps { pipeline_layouts.clear(); render_bundles.clear(); query_sets.clear(); + destroyed_buffers.clear(); } pub(crate) fn extend(&mut self, mut other: Self) { @@ -102,6 +109,7 @@ impl ResourceMaps { pipeline_layouts, render_bundles, query_sets, + destroyed_buffers, } = self; buffers.extend(other.buffers.drain()); staging_buffers.extend(other.staging_buffers.drain()); @@ -115,6 +123,7 @@ impl ResourceMaps { pipeline_layouts.extend(other.pipeline_layouts.drain()); render_bundles.extend(other.render_bundles.drain()); query_sets.extend(other.query_sets.drain()); + destroyed_buffers.extend(other.destroyed_buffers.drain()); } } @@ -281,6 +290,11 @@ impl LifetimeTracker { .staging_buffers .insert(raw.as_info().id(), raw); } + TempResource::DestroyedBuffer(destroyed) => { + last_resources + .destroyed_buffers + .insert(destroyed.id, destroyed); + } TempResource::Texture(raw) => { last_resources.textures.insert(raw.as_info().id(), raw); } @@ -384,6 +398,9 @@ impl LifetimeTracker { TempResource::StagingBuffer(raw) => { resources.staging_buffers.insert(raw.as_info().id(), raw); } + TempResource::DestroyedBuffer(destroyed) => { + resources.destroyed_buffers.insert(destroyed.id, destroyed); + } TempResource::Texture(raw) => { resources.textures.insert(raw.as_info().id(), raw); } @@ -646,6 +663,27 @@ impl LifetimeTracker { self } + fn triage_suspected_destroyed_buffers( + &mut self, + #[cfg(feature = "trace")] trace: &mut Option<&mut trace::Trace>, + ) { + for (id, buffer) in self.suspected_resources.destroyed_buffers.drain() { + let submit_index = buffer.submission_index; + if let Some(resources) = self.active.iter_mut().find(|a| a.index == submit_index) { + resources + .last_resources + .destroyed_buffers + .insert(id, buffer); + } else { + self.free_resources.destroyed_buffers.insert(id, buffer); + } + #[cfg(feature = "trace")] + if let Some(ref mut t) = *trace { + t.add(trace::Action::DestroyBuffer(id)); + } + } + } + fn triage_suspected_compute_pipelines( &mut self, trackers: &Mutex>, @@ -875,6 +913,10 @@ impl LifetimeTracker { #[cfg(feature = "trace")] &mut trace, ); + self.triage_suspected_destroyed_buffers( + #[cfg(feature = "trace")] + &mut trace, + ); } /// Determine which buffers are ready to map, and which must wait for the diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index 2a7e4a77db..5dff3a19ab 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -16,8 +16,8 @@ use crate::{ identity::{GlobalIdentityHandlerFactory, Input}, init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange}, resource::{ - Buffer, BufferAccessError, BufferMapState, Resource, ResourceInfo, ResourceType, - StagingBuffer, Texture, TextureInner, + Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, Resource, ResourceInfo, + ResourceType, StagingBuffer, Texture, TextureInner, }, resource_log, track, FastHashMap, SubmissionIndex, }; @@ -163,6 +163,7 @@ pub struct WrappedSubmissionIndex { pub enum TempResource { Buffer(Arc>), StagingBuffer(Arc>), + DestroyedBuffer(Arc>), Texture(Arc>), } diff --git a/wgpu-core/src/resource.rs b/wgpu-core/src/resource.rs index c9857c1f30..9223df6428 100644 --- a/wgpu-core/src/resource.rs +++ b/wgpu-core/src/resource.rs @@ -413,7 +413,7 @@ pub struct Buffer { impl Drop for Buffer { fn drop(&mut self) { if let Some(raw) = self.raw.take() { - resource_log!("Destroy raw Buffer {:?}", self.info.label()); + resource_log!("Deallocate raw Buffer (dropped) {:?}", self.info.label()); unsafe { use hal::Device; self.device.raw().destroy_buffer(raw); @@ -553,14 +553,25 @@ impl Buffer { if let Some(ref mut trace) = *device.trace.lock() { trace.add(trace::Action::FreeBuffer(buffer_id)); } - // Note: a future commit will replace this with a read guard - // and actually snatch the buffer. - let snatch_guard = device.snatchable_lock.read(); - if self.raw.get(&snatch_guard).is_none() { - return Err(resource::DestroyError::AlreadyDestroyed); - } - let temp = queue::TempResource::Buffer(self.clone()); + let temp = { + let snatch_guard = device.snatchable_lock.write(); + let raw = match self.raw.snatch(snatch_guard) { + Some(raw) => raw, + None => { + return Err(resource::DestroyError::AlreadyDestroyed); + } + }; + + queue::TempResource::DestroyedBuffer(Arc::new(DestroyedBuffer { + raw: Some(raw), + device: Arc::clone(&self.device), + submission_index: self.info.submission_index(), + id: self.info.id.unwrap(), + label: self.info.label.clone(), + })) + }; + let mut pending_writes = device.pending_writes.lock(); let pending_writes = pending_writes.as_mut().unwrap(); if pending_writes.dst_buffers.contains_key(&buffer_id) { @@ -607,6 +618,38 @@ impl Resource for Buffer { } } +/// A buffer that has been marked as destroyed and is staged for actual deletion soon. +#[derive(Debug)] +pub struct DestroyedBuffer { + raw: Option, + device: Arc>, + label: String, + pub(crate) id: BufferId, + pub(crate) submission_index: u64, +} + +impl DestroyedBuffer { + pub fn label(&self) -> &dyn Debug { + if !self.label.is_empty() { + return &self.label; + } + + &self.id + } +} + +impl Drop for DestroyedBuffer { + fn drop(&mut self) { + if let Some(raw) = self.raw.take() { + resource_log!("Deallocate raw Buffer (destroyed) {:?}", self.label()); + unsafe { + use hal::Device; + self.device.raw().destroy_buffer(raw); + } + } + } +} + /// A temporary buffer, consumed by the command that uses it. /// /// A [`StagingBuffer`] is designed for one-shot uploads of data to the GPU. It