From 26a5499ac5a24b5fbfb36b1716157a4985881878 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Wed, 29 Nov 2023 23:53:33 -0500 Subject: [PATCH 01/22] WIP: Compile pipelines on the AsyncComputeTaskPool --- .../src/render_resource/pipeline_cache.rs | 93 +++++++++++-------- crates/bevy_utils/src/futures.rs | 10 +- 2 files changed, 62 insertions(+), 41 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 3eb6f8ae43ef9..ac32b48c275b0 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -10,6 +10,7 @@ use crate::{ use bevy_asset::{AssetEvent, AssetId, Assets}; use bevy_ecs::system::{Res, ResMut}; use bevy_ecs::{event::EventReader, system::Resource}; +use bevy_tasks::{AsyncComputeTaskPool, Task}; use bevy_utils::{ default, tracing::{debug, error}, @@ -20,7 +21,7 @@ use std::{ borrow::Cow, hash::Hash, mem, - ops::Deref, + ops::{ControlFlow, Deref}, sync::{Mutex, PoisonError}, }; use thiserror::Error; @@ -94,6 +95,8 @@ pub struct CachedPipeline { pub enum CachedPipelineState { /// The pipeline GPU object is queued for creation. Queued, + /// The pipeline GPU object is being created. + Creating(Task), /// The pipeline GPU object was created successfully and is available (allocated on the GPU). Ok(Pipeline), /// An error occurred while trying to create the pipeline GPU object. @@ -117,6 +120,9 @@ impl CachedPipelineState { CachedPipelineState::Queued => { panic!("Pipeline has not been compiled yet. It is still in the 'Queued' state.") } + CachedPipelineState::Creating(..) => { + panic!("Pipeline has not been compiled yet. It is still in the 'Creating' state.") + } CachedPipelineState::Err(err) => panic!("{}", err), } } @@ -464,7 +470,7 @@ impl LayoutCache { /// pipeline object is deferred to the [`RenderSet::Render`] step, just before the render /// graph starts being processed, as this requires access to the GPU. /// -/// Note that the cache do not perform automatic deduplication of identical pipelines. It is +/// Note that the cache does not perform automatic deduplication of identical pipelines. It is /// up to the user not to insert the same pipeline twice to avoid wasting GPU resources. /// /// [`RenderSet::Render`]: crate::RenderSet::Render @@ -649,7 +655,7 @@ impl PipelineCache { } } - fn process_render_pipeline( + fn try_start_create_render_pipeline( &mut self, id: CachedPipelineId, descriptor: &RenderPipelineDescriptor, @@ -729,12 +735,13 @@ impl PipelineCache { }), }; - let pipeline = self.device.create_render_pipeline(&descriptor); - - CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) + let device = self.device.clone(); + CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { + Pipeline::RenderPipeline(device.create_render_pipeline(&descriptor)) + })) } - fn process_compute_pipeline( + fn try_start_create_compute_pipeline( &mut self, id: CachedPipelineId, descriptor: &ComputePipelineDescriptor, @@ -768,9 +775,10 @@ impl PipelineCache { entry_point: descriptor.entry_point.as_ref(), }; - let pipeline = self.device.create_compute_pipeline(&descriptor); - - CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) + let device = self.device.clone(); + CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { + Pipeline::ComputePipeline(device.create_compute_pipeline(&descriptor)) + })) } /// Process the pipeline queue and create all pending pipelines if possible. @@ -796,42 +804,49 @@ impl PipelineCache { } for id in waiting_pipelines { - let pipeline = &mut pipelines[id]; - if matches!(pipeline.state, CachedPipelineState::Ok(_)) { - continue; - } + self.process_pipeline(&mut pipelines[id], id); + } - pipeline.state = match &pipeline.descriptor { - PipelineDescriptor::RenderPipelineDescriptor(descriptor) => { - self.process_render_pipeline(id, descriptor) - } - PipelineDescriptor::ComputePipelineDescriptor(descriptor) => { - self.process_compute_pipeline(id, descriptor) - } - }; + self.pipelines = pipelines; + } - if let CachedPipelineState::Err(err) = &pipeline.state { - match err { - PipelineCacheError::ShaderNotLoaded(_) - | PipelineCacheError::ShaderImportNotYetAvailable => { - // retry - self.waiting_pipelines.insert(id); - } - // shader could not be processed ... retrying won't help - PipelineCacheError::ProcessShaderError(err) => { - let error_detail = err.emit_to_string(&self.shader_cache.composer); - error!("failed to process shader:\n{}", error_detail); - continue; + fn process_pipeline(&mut self, cached_pipeline: &mut CachedPipeline, id: usize) { + match &mut cached_pipeline.state { + CachedPipelineState::Queued => { + cached_pipeline.state = match &cached_pipeline.descriptor { + PipelineDescriptor::RenderPipelineDescriptor(descriptor) => { + self.try_start_create_render_pipeline(id, descriptor) } - PipelineCacheError::CreateShaderModule(description) => { - error!("failed to create shader module: {}", description); - continue; + PipelineDescriptor::ComputePipelineDescriptor(descriptor) => { + self.try_start_create_compute_pipeline(id, descriptor) } + }; + } + + CachedPipelineState::Creating(ref mut task) => { + if let Some(pipeline) = bevy_utils::futures::check_ready(task) { + cached_pipeline.state = CachedPipelineState::Ok(pipeline); } } - } - self.pipelines = pipelines; + CachedPipelineState::Err(err) => match err { + PipelineCacheError::ShaderNotLoaded(_) + | PipelineCacheError::ShaderImportNotYetAvailable => { + // Retry + self.waiting_pipelines.insert(id); + } + // Shader could not be processed ... retrying won't help + PipelineCacheError::ProcessShaderError(err) => { + let error_detail = err.emit_to_string(&self.shader_cache.composer); + error!("failed to process shader:\n{}", error_detail); + } + PipelineCacheError::CreateShaderModule(description) => { + error!("failed to create shader module: {}", description); + } + }, + + CachedPipelineState::Ok(_) => unreachable!(), + } } pub(crate) fn process_pipeline_queue_system(mut cache: ResMut) { diff --git a/crates/bevy_utils/src/futures.rs b/crates/bevy_utils/src/futures.rs index 07ce365be9bfe..ca56a260cb54e 100644 --- a/crates/bevy_utils/src/futures.rs +++ b/crates/bevy_utils/src/futures.rs @@ -5,16 +5,22 @@ use std::{ task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, }; -/// Consumes the future, polls it once, and immediately returns the output +/// Consumes a future, polls it once, and immediately returns the output /// or returns `None` if it wasn't ready yet. /// /// This will cancel the future if it's not ready. pub fn now_or_never(mut future: F) -> Option { + check_ready(&mut future) +} + +/// Polls a future once, and returns the output if ready +/// or returns `None` if it wasn't ready yet. +pub fn check_ready(future: &mut F) -> Option { let noop_waker = noop_waker(); let mut cx = Context::from_waker(&noop_waker); // SAFETY: `future` is not moved and the original value is shadowed - let future = unsafe { Pin::new_unchecked(&mut future) }; + let future = unsafe { Pin::new_unchecked(future) }; match future.poll(&mut cx) { Poll::Ready(x) => Some(x), From a251c3a58db8f5a8c67e268baf5118837fb93c4f Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Thu, 30 Nov 2023 00:02:04 -0500 Subject: [PATCH 02/22] Remove unused import --- crates/bevy_render/src/render_resource/pipeline_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index ac32b48c275b0..ee53ebdd1bd9c 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -21,7 +21,7 @@ use std::{ borrow::Cow, hash::Hash, mem, - ops::{ControlFlow, Deref}, + ops::Deref, sync::{Mutex, PoisonError}, }; use thiserror::Error; From 21aed5f04c67b07e67b18524b2c64643851ba839 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Thu, 30 Nov 2023 20:58:24 -0500 Subject: [PATCH 03/22] More WIP --- .../src/render_resource/pipeline_cache.rs | 75 +++++++++---------- 1 file changed, 37 insertions(+), 38 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index ee53ebdd1bd9c..375e9d7b6d883 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -1,12 +1,4 @@ -use crate::{ - render_resource::{ - BindGroupLayout, BindGroupLayoutId, ComputePipeline, ComputePipelineDescriptor, - RawComputePipelineDescriptor, RawFragmentState, RawRenderPipelineDescriptor, - RawVertexState, RenderPipeline, RenderPipelineDescriptor, Shader, ShaderImport, Source, - }, - renderer::RenderDevice, - Extract, -}; +use crate::{render_resource::*, renderer::RenderDevice, Extract}; use bevy_asset::{AssetEvent, AssetId, Assets}; use bevy_ecs::system::{Res, ResMut}; use bevy_ecs::{event::EventReader, system::Resource}; @@ -442,7 +434,7 @@ impl LayoutCache { render_device: &RenderDevice, bind_group_layouts: &[BindGroupLayout], push_constant_ranges: Vec, - ) -> &wgpu::PipelineLayout { + ) -> ErasedPipelineLayout { let bind_group_ids = bind_group_layouts.iter().map(|l| l.id()).collect(); self.layouts .entry((bind_group_ids, push_constant_ranges)) @@ -459,6 +451,7 @@ impl LayoutCache { }, )) }) + .clone() } } @@ -714,29 +707,34 @@ impl PipelineCache { )) }; - let descriptor = RawRenderPipelineDescriptor { - multiview: None, - depth_stencil: descriptor.depth_stencil.clone(), - label: descriptor.label.as_deref(), - layout, - multisample: descriptor.multisample, - primitive: descriptor.primitive, - vertex: RawVertexState { - buffers: &vertex_buffer_layouts, - entry_point: descriptor.vertex.entry_point.deref(), - module: &vertex_module, - }, - fragment: fragment_data - .as_ref() - .map(|(module, entry_point, targets)| RawFragmentState { - entry_point, - module, - targets, - }), - }; - let device = self.device.clone(); + let depth_stencil = descriptor.depth_stencil.clone(); + let label = descriptor.label.to_owned(); + let multisample = descriptor.multisample; + let primitive = descriptor.primitive; + let vertex_entry_point = descriptor.vertex.entry_point.to_owned(); + let fragment_data = fragment_data.map(|(module, entry_point, targets)| { + (module, entry_point.to_owned(), targets.to_owned()) + }); CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { + let descriptor = RawRenderPipelineDescriptor { + multiview: None, + depth_stencil, + label: label.as_deref(), + layout: layout.as_deref(), + multisample, + primitive, + vertex: RawVertexState { + buffers: &vertex_buffer_layouts, + entry_point: vertex_entry_point.deref(), + module: &vertex_module, + }, + fragment: fragment_data.map(|(module, entry_point, targets)| RawFragmentState { + module: &module, + entry_point: &entry_point, + targets: &targets, + }), + }; Pipeline::RenderPipeline(device.create_render_pipeline(&descriptor)) })) } @@ -768,15 +766,16 @@ impl PipelineCache { )) }; - let descriptor = RawComputePipelineDescriptor { - label: descriptor.label.as_deref(), - layout, - module: &compute_module, - entry_point: descriptor.entry_point.as_ref(), - }; - let device = self.device.clone(); + let label = descriptor.label.to_owned(); + let entry_point = descriptor.entry_point.to_owned(); CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { + let descriptor = RawComputePipelineDescriptor { + label: label.as_deref(), + layout: layout.as_deref(), + module: &compute_module, + entry_point: &entry_point, + }; Pipeline::ComputePipeline(device.create_compute_pipeline(&descriptor)) })) } From ca752d962b3686b074d0075b191aa0980a38ba46 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 1 Dec 2023 12:20:12 -0500 Subject: [PATCH 04/22] More WIP --- .../src/render_resource/pipeline_cache.rs | 44 ++++++++----------- 1 file changed, 18 insertions(+), 26 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 375e9d7b6d883..c18c728112813 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -651,7 +651,7 @@ impl PipelineCache { fn try_start_create_render_pipeline( &mut self, id: CachedPipelineId, - descriptor: &RenderPipelineDescriptor, + descriptor: RenderPipelineDescriptor, ) -> CachedPipelineState { let vertex_module = match self.shader_cache.get( &self.device, @@ -708,32 +708,26 @@ impl PipelineCache { }; let device = self.device.clone(); - let depth_stencil = descriptor.depth_stencil.clone(); - let label = descriptor.label.to_owned(); - let multisample = descriptor.multisample; - let primitive = descriptor.primitive; - let vertex_entry_point = descriptor.vertex.entry_point.to_owned(); - let fragment_data = fragment_data.map(|(module, entry_point, targets)| { - (module, entry_point.to_owned(), targets.to_owned()) - }); CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { let descriptor = RawRenderPipelineDescriptor { multiview: None, - depth_stencil, - label: label.as_deref(), + depth_stencil: descriptor.depth_stencil.clone(), + label: descriptor.label.as_deref(), layout: layout.as_deref(), - multisample, - primitive, + multisample: descriptor.multisample, + primitive: descriptor.primitive, vertex: RawVertexState { buffers: &vertex_buffer_layouts, - entry_point: vertex_entry_point.deref(), + entry_point: descriptor.vertex.entry_point.deref(), module: &vertex_module, }, - fragment: fragment_data.map(|(module, entry_point, targets)| RawFragmentState { - module: &module, - entry_point: &entry_point, - targets: &targets, - }), + fragment: fragment_data + .as_ref() + .map(|(module, entry_point, targets)| RawFragmentState { + entry_point, + module, + targets, + }), }; Pipeline::RenderPipeline(device.create_render_pipeline(&descriptor)) })) @@ -742,7 +736,7 @@ impl PipelineCache { fn try_start_create_compute_pipeline( &mut self, id: CachedPipelineId, - descriptor: &ComputePipelineDescriptor, + descriptor: ComputePipelineDescriptor, ) -> CachedPipelineState { let compute_module = match self.shader_cache.get( &self.device, @@ -767,14 +761,12 @@ impl PipelineCache { }; let device = self.device.clone(); - let label = descriptor.label.to_owned(); - let entry_point = descriptor.entry_point.to_owned(); CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { let descriptor = RawComputePipelineDescriptor { - label: label.as_deref(), + label: descriptor.label.as_deref(), layout: layout.as_deref(), module: &compute_module, - entry_point: &entry_point, + entry_point: &descriptor.entry_point, }; Pipeline::ComputePipeline(device.create_compute_pipeline(&descriptor)) })) @@ -814,10 +806,10 @@ impl PipelineCache { CachedPipelineState::Queued => { cached_pipeline.state = match &cached_pipeline.descriptor { PipelineDescriptor::RenderPipelineDescriptor(descriptor) => { - self.try_start_create_render_pipeline(id, descriptor) + self.try_start_create_render_pipeline(id, *descriptor.clone()) } PipelineDescriptor::ComputePipelineDescriptor(descriptor) => { - self.try_start_create_compute_pipeline(id, descriptor) + self.try_start_create_compute_pipeline(id, *descriptor.clone()) } }; } From 96b05944d8328aae9aaa1dfdc782ca92203680d3 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 1 Dec 2023 14:45:13 -0500 Subject: [PATCH 05/22] Code compiles, but code quality sucks --- .../src/render_resource/pipeline_cache.rs | 50 +++++++++++-------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index c18c728112813..8530de7117a3e 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -665,38 +665,21 @@ impl PipelineCache { } }; - let fragment_data = if let Some(fragment) = &descriptor.fragment { - let fragment_module = match self.shader_cache.get( + let fragment_module = match &descriptor.fragment { + Some(fragment) => match self.shader_cache.get( &self.device, id, fragment.shader.id(), &fragment.shader_defs, ) { - Ok(module) => module, + Ok(module) => Some(module), Err(err) => { return CachedPipelineState::Err(err); } - }; - Some(( - fragment_module, - fragment.entry_point.deref(), - fragment.targets.as_slice(), - )) - } else { - None + }, + None => None, }; - let vertex_buffer_layouts = descriptor - .vertex - .buffers - .iter() - .map(|layout| RawVertexBufferLayout { - array_stride: layout.array_stride, - attributes: &layout.attributes, - step_mode: layout.step_mode, - }) - .collect::>(); - let layout = if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() { None } else { @@ -709,6 +692,27 @@ impl PipelineCache { let device = self.device.clone(); CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { + let vertex_buffer_layouts = descriptor + .vertex + .buffers + .iter() + .map(|layout| RawVertexBufferLayout { + array_stride: layout.array_stride, + attributes: &layout.attributes, + step_mode: layout.step_mode, + }) + .collect::>(); + + let fragment_data = if let Some(fragment) = &descriptor.fragment { + Some(( + fragment_module.unwrap(), + fragment.entry_point.deref(), + fragment.targets.as_slice(), + )) + } else { + None + }; + let descriptor = RawRenderPipelineDescriptor { multiview: None, depth_stencil: descriptor.depth_stencil.clone(), @@ -729,6 +733,7 @@ impl PipelineCache { targets, }), }; + Pipeline::RenderPipeline(device.create_render_pipeline(&descriptor)) })) } @@ -768,6 +773,7 @@ impl PipelineCache { module: &compute_module, entry_point: &descriptor.entry_point, }; + Pipeline::ComputePipeline(device.create_compute_pipeline(&descriptor)) })) } From b6599d3e248cf9dbcb759238e2babe115dcf0c20 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 1 Dec 2023 14:50:31 -0500 Subject: [PATCH 06/22] Bugfix --- .../src/render_resource/pipeline_cache.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 8530de7117a3e..d7e84d34b8f58 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -823,27 +823,32 @@ impl PipelineCache { CachedPipelineState::Creating(ref mut task) => { if let Some(pipeline) = bevy_utils::futures::check_ready(task) { cached_pipeline.state = CachedPipelineState::Ok(pipeline); + return; } } CachedPipelineState::Err(err) => match err { + // Retry PipelineCacheError::ShaderNotLoaded(_) - | PipelineCacheError::ShaderImportNotYetAvailable => { - // Retry - self.waiting_pipelines.insert(id); - } + | PipelineCacheError::ShaderImportNotYetAvailable => {} + // Shader could not be processed ... retrying won't help PipelineCacheError::ProcessShaderError(err) => { let error_detail = err.emit_to_string(&self.shader_cache.composer); error!("failed to process shader:\n{}", error_detail); + return; } PipelineCacheError::CreateShaderModule(description) => { error!("failed to create shader module: {}", description); + return; } }, CachedPipelineState::Ok(_) => unreachable!(), } + + // Retry + self.waiting_pipelines.insert(id); } pub(crate) fn process_pipeline_queue_system(mut cache: ResMut) { From ed763e66af5ba377a6390b83794e63aa6ee3e6dd Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 26 Jan 2024 18:53:24 -0800 Subject: [PATCH 07/22] Clippy --- .../bevy_render/src/render_resource/pipeline_cache.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index d6595f4ce1e4b..a6961303b2cf7 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -707,15 +707,13 @@ impl PipelineCache { }) .collect::>(); - let fragment_data = if let Some(fragment) = &descriptor.fragment { - Some(( + let fragment_data = descriptor.fragment.as_ref().map(|fragment| { + ( fragment_module.unwrap(), fragment.entry_point.deref(), fragment.targets.as_slice(), - )) - } else { - None - }; + ) + }); let descriptor = RawRenderPipelineDescriptor { multiview: None, From 70db708b29f62f59a0e56307448e0c4bd238f76c Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 26 Jan 2024 18:57:40 -0800 Subject: [PATCH 08/22] Fix crashes for pipelines --- .../src/contrast_adaptive_sharpening/node.rs | 4 +++- crates/bevy_core_pipeline/src/fxaa/node.rs | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs index ee0b4672b9226..e75349717d010 100644 --- a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs +++ b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs @@ -63,7 +63,9 @@ impl Node for CASNode { return Ok(()); }; - let pipeline = pipeline_cache.get_render_pipeline(pipeline.0).unwrap(); + let Some(pipeline) = pipeline_cache.get_render_pipeline(pipeline.0) else { + return Ok(()); + }; let view_target = target.post_process_write(); let source = view_target.source; diff --git a/crates/bevy_core_pipeline/src/fxaa/node.rs b/crates/bevy_core_pipeline/src/fxaa/node.rs index 1a575b8338035..857a3015c181b 100644 --- a/crates/bevy_core_pipeline/src/fxaa/node.rs +++ b/crates/bevy_core_pipeline/src/fxaa/node.rs @@ -39,9 +39,9 @@ impl ViewNode for FxaaNode { return Ok(()); }; - let pipeline = pipeline_cache - .get_render_pipeline(pipeline.pipeline_id) - .unwrap(); + let Some(pipeline) = pipeline_cache.get_render_pipeline(pipeline.pipeline_id) else { + return Ok(()); + }; let post_process = target.post_process_write(); let source = post_process.source; From ca64760d0346c6bb9d9dc98d84679bcf8792de64 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 26 Jan 2024 19:08:41 -0800 Subject: [PATCH 09/22] Add workaround for the present_frames error log --- crates/bevy_render/src/camera/camera_driver_node.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/bevy_render/src/camera/camera_driver_node.rs b/crates/bevy_render/src/camera/camera_driver_node.rs index 9f24b72fde862..81d32581f7567 100644 --- a/crates/bevy_render/src/camera/camera_driver_node.rs +++ b/crates/bevy_render/src/camera/camera_driver_node.rs @@ -4,6 +4,7 @@ use crate::{ renderer::RenderContext, view::ExtractedWindows, }; +use bevy_core::FrameCount; use bevy_ecs::{prelude::QueryState, world::World}; use bevy_utils::HashSet; use wgpu::{LoadOp, Operations, RenderPassColorAttachment, RenderPassDescriptor, StoreOp}; @@ -57,10 +58,14 @@ impl Node for CameraDriverNode { } } + // For the first few frames, no pipelines have finished compiling, and so no work will have been sent to the GPU, + // causing error messages on startup. Scheduling some dummy work bypasses this. + let first_few_frames = world.resource::().0 <= 5; + // wgpu (and some backends) require doing work for swap chains if you call `get_current_texture()` and `present()` // This ensures that Bevy doesn't crash, even when there are no cameras (and therefore no work submitted). for (id, window) in world.resource::().iter() { - if camera_windows.contains(id) { + if camera_windows.contains(id) && !first_few_frames { continue; } From 373dd5dc83d858556144f209c30e3cd68a0cdbaa Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 26 Jan 2024 20:00:08 -0800 Subject: [PATCH 10/22] Fix for non-multithreaded environments --- .../src/render_resource/pipeline_cache.rs | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index a6961303b2cf7..d5c982f21b950 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -2,7 +2,7 @@ use crate::{render_resource::*, renderer::RenderDevice, Extract}; use bevy_asset::{AssetEvent, AssetId, Assets}; use bevy_ecs::system::{Res, ResMut}; use bevy_ecs::{event::EventReader, system::Resource}; -use bevy_tasks::{AsyncComputeTaskPool, Task}; +use bevy_tasks::Task; use bevy_utils::{ default, tracing::{debug, error}, @@ -11,6 +11,7 @@ use bevy_utils::{ use naga::valid::Capabilities; use std::{ borrow::Cow, + future::Future, hash::Hash, mem, ops::Deref, @@ -695,7 +696,7 @@ impl PipelineCache { }; let device = self.device.clone(); - CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { + create_pipeline_task(async move { let vertex_buffer_layouts = descriptor .vertex .buffers @@ -737,7 +738,7 @@ impl PipelineCache { }; Pipeline::RenderPipeline(device.create_render_pipeline(&descriptor)) - })) + }) } fn try_start_create_compute_pipeline( @@ -768,7 +769,7 @@ impl PipelineCache { }; let device = self.device.clone(); - CachedPipelineState::Creating(AsyncComputeTaskPool::get().spawn(async move { + create_pipeline_task(async move { let descriptor = RawComputePipelineDescriptor { label: descriptor.label.as_deref(), layout: layout.as_deref(), @@ -777,7 +778,7 @@ impl PipelineCache { }; Pipeline::ComputePipeline(device.create_compute_pipeline(&descriptor)) - })) + }) } /// Process the pipeline queue and create all pending pipelines if possible. @@ -846,7 +847,7 @@ impl PipelineCache { } }, - CachedPipelineState::Ok(_) => unreachable!(), + CachedPipelineState::Ok(_) => {} } // Retry @@ -880,6 +881,16 @@ impl PipelineCache { } } +fn create_pipeline_task( + task: impl Future + Send + 'static, +) -> CachedPipelineState { + #[cfg(all(not(target_arch = "wasm32"), feature = "multi-threaded"))] + return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task)); + + #[cfg(any(target_arch = "wasm32", not(feature = "multi-threaded")))] + CachedPipelineState::Ok(futures_lite::future::block_on(task)) +} + /// Type of error returned by a [`PipelineCache`] when the creation of a GPU pipeline object failed. #[derive(Error, Debug)] pub enum PipelineCacheError { From 39baba77e8be430617e2fbd884947cca2ec67962 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 26 Jan 2024 20:14:58 -0800 Subject: [PATCH 11/22] Add bevy_render/multi-threaded feature --- crates/bevy_internal/Cargo.toml | 1 + crates/bevy_render/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/crates/bevy_internal/Cargo.toml b/crates/bevy_internal/Cargo.toml index 1ba3a2b7e51ff..609312ce286b3 100644 --- a/crates/bevy_internal/Cargo.toml +++ b/crates/bevy_internal/Cargo.toml @@ -77,6 +77,7 @@ serialize = [ multi-threaded = [ "bevy_asset/multi-threaded", "bevy_ecs/multi-threaded", + "bevy_render/multi-threaded", "bevy_tasks/multi-threaded", ] async-io = ["bevy_tasks/async-io"] diff --git a/crates/bevy_render/Cargo.toml b/crates/bevy_render/Cargo.toml index f057470fb9578..e255103c97cd9 100644 --- a/crates/bevy_render/Cargo.toml +++ b/crates/bevy_render/Cargo.toml @@ -18,6 +18,7 @@ bmp = ["image/bmp"] webp = ["image/webp"] dds = ["ddsfile"] pnm = ["image/pnm"] +multi-threaded = ["bevy_tasks/multi-threaded"] bevy_ci_testing = ["bevy_app/bevy_ci_testing"] shader_format_glsl = ["naga/glsl-in", "naga/wgsl-out", "naga_oil/glsl"] From 87b8798f7a9fb81ff5fe3abe4729db8de206dd53 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 26 Jan 2024 20:21:36 -0800 Subject: [PATCH 12/22] Increase frame count hack --- crates/bevy_render/src/camera/camera_driver_node.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/bevy_render/src/camera/camera_driver_node.rs b/crates/bevy_render/src/camera/camera_driver_node.rs index 81d32581f7567..4f5dfab8ac9fb 100644 --- a/crates/bevy_render/src/camera/camera_driver_node.rs +++ b/crates/bevy_render/src/camera/camera_driver_node.rs @@ -58,14 +58,14 @@ impl Node for CameraDriverNode { } } - // For the first few frames, no pipelines have finished compiling, and so no work will have been sent to the GPU, + // For the first several frames, no pipelines have finished compiling, and so no work will have been sent to the GPU, // causing error messages on startup. Scheduling some dummy work bypasses this. - let first_few_frames = world.resource::().0 <= 5; + let first_several_frames = world.resource::().0 <= 100; // wgpu (and some backends) require doing work for swap chains if you call `get_current_texture()` and `present()` // This ensures that Bevy doesn't crash, even when there are no cameras (and therefore no work submitted). for (id, window) in world.resource::().iter() { - if camera_windows.contains(id) && !first_few_frames { + if camera_windows.contains(id) && !first_several_frames { continue; } From aff28cf85524a758870834febf432c3538440a43 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 26 Jan 2024 21:48:54 -0800 Subject: [PATCH 13/22] Fix bug --- .../bevy_render/src/render_resource/pipeline_cache.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index d5c982f21b950..3e637163df088 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -821,37 +821,34 @@ impl PipelineCache { self.try_start_create_compute_pipeline(id, *descriptor.clone()) } }; + self.waiting_pipelines.insert(id); } CachedPipelineState::Creating(ref mut task) => { if let Some(pipeline) = bevy_utils::futures::check_ready(task) { cached_pipeline.state = CachedPipelineState::Ok(pipeline); - return; } } CachedPipelineState::Err(err) => match err { // Retry PipelineCacheError::ShaderNotLoaded(_) - | PipelineCacheError::ShaderImportNotYetAvailable => {} + | PipelineCacheError::ShaderImportNotYetAvailable => { + self.waiting_pipelines.insert(id); + } // Shader could not be processed ... retrying won't help PipelineCacheError::ProcessShaderError(err) => { let error_detail = err.emit_to_string(&self.shader_cache.composer); error!("failed to process shader:\n{}", error_detail); - return; } PipelineCacheError::CreateShaderModule(description) => { error!("failed to create shader module: {}", description); - return; } }, CachedPipelineState::Ok(_) => {} } - - // Retry - self.waiting_pipelines.insert(id); } pub(crate) fn process_pipeline_queue_system(mut cache: ResMut) { From a54a0d396a036efd3b896b7136982fdad2affeab Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Sun, 28 Jan 2024 16:50:23 -0800 Subject: [PATCH 14/22] Revert "Fix bug" This reverts commit aff28cf85524a758870834febf432c3538440a43. --- .../bevy_render/src/render_resource/pipeline_cache.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 3e637163df088..d5c982f21b950 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -821,34 +821,37 @@ impl PipelineCache { self.try_start_create_compute_pipeline(id, *descriptor.clone()) } }; - self.waiting_pipelines.insert(id); } CachedPipelineState::Creating(ref mut task) => { if let Some(pipeline) = bevy_utils::futures::check_ready(task) { cached_pipeline.state = CachedPipelineState::Ok(pipeline); + return; } } CachedPipelineState::Err(err) => match err { // Retry PipelineCacheError::ShaderNotLoaded(_) - | PipelineCacheError::ShaderImportNotYetAvailable => { - self.waiting_pipelines.insert(id); - } + | PipelineCacheError::ShaderImportNotYetAvailable => {} // Shader could not be processed ... retrying won't help PipelineCacheError::ProcessShaderError(err) => { let error_detail = err.emit_to_string(&self.shader_cache.composer); error!("failed to process shader:\n{}", error_detail); + return; } PipelineCacheError::CreateShaderModule(description) => { error!("failed to create shader module: {}", description); + return; } }, CachedPipelineState::Ok(_) => {} } + + // Retry + self.waiting_pipelines.insert(id); } pub(crate) fn process_pipeline_queue_system(mut cache: ResMut) { From 4aed836e344b68305da5995f383ccc530cc81851 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Sun, 28 Jan 2024 16:51:44 -0800 Subject: [PATCH 15/22] Fix --- crates/bevy_render/src/render_resource/pipeline_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index d5c982f21b950..a6e7aef5a7493 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -847,7 +847,7 @@ impl PipelineCache { } }, - CachedPipelineState::Ok(_) => {} + CachedPipelineState::Ok(_) => return, } // Retry From 48270aeff4e58d85790708fedd575ac21a6707ff Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Sun, 28 Jan 2024 19:29:46 -0800 Subject: [PATCH 16/22] Remove macos async pipeline support --- .../src/render_resource/pipeline_cache.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index a6e7aef5a7493..868d263dc67c3 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -884,10 +884,18 @@ impl PipelineCache { fn create_pipeline_task( task: impl Future + Send + 'static, ) -> CachedPipelineState { - #[cfg(all(not(target_arch = "wasm32"), feature = "multi-threaded"))] + #[cfg(all( + not(target_arch = "wasm32"), + not(target_os = "macos"), + feature = "multi-threaded" + ))] return CachedPipelineState::Creating(bevy_tasks::AsyncComputeTaskPool::get().spawn(task)); - #[cfg(any(target_arch = "wasm32", not(feature = "multi-threaded")))] + #[cfg(any( + target_arch = "wasm32", + target_os = "macos", + not(feature = "multi-threaded") + ))] CachedPipelineState::Ok(futures_lite::future::block_on(task)) } From 488fc1158f4a137cf967d676c8e97c1858e90fb9 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Wed, 31 Jan 2024 19:25:30 -0800 Subject: [PATCH 17/22] Move expensive work into the task --- .../src/render_resource/pipeline_cache.rs | 161 ++++++++++-------- 1 file changed, 88 insertions(+), 73 deletions(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 868d263dc67c3..31b2629d1a987 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -15,7 +15,7 @@ use std::{ hash::Hash, mem, ops::Deref, - sync::{Mutex, PoisonError}, + sync::{Arc, Mutex, PoisonError}, }; use thiserror::Error; #[cfg(feature = "shader_format_spirv")] @@ -89,7 +89,7 @@ pub enum CachedPipelineState { /// The pipeline GPU object is queued for creation. Queued, /// The pipeline GPU object is being created. - Creating(Task), + Creating(Task>), /// The pipeline GPU object was created successfully and is available (allocated on the GPU). Ok(Pipeline), /// An error occurred while trying to create the pipeline GPU object. @@ -474,8 +474,8 @@ impl LayoutCache { /// [`RenderSet::Render`]: crate::RenderSet::Render #[derive(Resource)] pub struct PipelineCache { - layout_cache: LayoutCache, - shader_cache: ShaderCache, + layout_cache: Arc>, + shader_cache: Arc>, device: RenderDevice, pipelines: Vec, waiting_pipelines: HashSet, @@ -490,7 +490,7 @@ impl PipelineCache { /// Create a new pipeline cache associated with the given render device. pub fn new(device: RenderDevice) -> Self { Self { - shader_cache: ShaderCache::new(&device), + shader_cache: Arc::new(Mutex::new(ShaderCache::new(&device))), device, layout_cache: default(), waiting_pipelines: default(), @@ -638,7 +638,8 @@ impl PipelineCache { } fn set_shader(&mut self, id: AssetId, shader: &Shader) { - let pipelines_to_queue = self.shader_cache.set_shader(id, shader.clone()); + let mut shader_cache = self.shader_cache.lock().unwrap(); + let pipelines_to_queue = shader_cache.set_shader(id, shader.clone()); for cached_pipeline in pipelines_to_queue { self.pipelines[cached_pipeline].state = CachedPipelineState::Queued; self.waiting_pipelines.insert(cached_pipeline); @@ -646,57 +647,58 @@ impl PipelineCache { } fn remove_shader(&mut self, shader: AssetId) { - let pipelines_to_queue = self.shader_cache.remove(shader); + let mut shader_cache = self.shader_cache.lock().unwrap(); + let pipelines_to_queue = shader_cache.remove(shader); for cached_pipeline in pipelines_to_queue { self.pipelines[cached_pipeline].state = CachedPipelineState::Queued; self.waiting_pipelines.insert(cached_pipeline); } } - fn try_start_create_render_pipeline( + fn start_create_render_pipeline( &mut self, id: CachedPipelineId, descriptor: RenderPipelineDescriptor, ) -> CachedPipelineState { - let vertex_module = match self.shader_cache.get( - &self.device, - id, - descriptor.vertex.shader.id(), - &descriptor.vertex.shader_defs, - ) { - Ok(module) => module, - Err(err) => { - return CachedPipelineState::Err(err); - } - }; + let device = self.device.clone(); + let shader_cache = self.shader_cache.clone(); + let layout_cache = self.layout_cache.clone(); + create_pipeline_task(async move { + let mut shader_cache = shader_cache.lock().unwrap(); + let mut layout_cache = layout_cache.lock().unwrap(); - let fragment_module = match &descriptor.fragment { - Some(fragment) => match self.shader_cache.get( - &self.device, + let vertex_module = match shader_cache.get( + &device, id, - fragment.shader.id(), - &fragment.shader_defs, + descriptor.vertex.shader.id(), + &descriptor.vertex.shader_defs, ) { - Ok(module) => Some(module), - Err(err) => { - return CachedPipelineState::Err(err); + Ok(module) => module, + Err(err) => return Err(err), + }; + + let fragment_module = match &descriptor.fragment { + Some(fragment) => { + match shader_cache.get(&device, id, fragment.shader.id(), &fragment.shader_defs) + { + Ok(module) => Some(module), + Err(err) => return Err(err), + } } - }, - None => None, - }; + None => None, + }; - let layout = if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() { - None - } else { - Some(self.layout_cache.get( - &self.device, - &descriptor.layout, - descriptor.push_constant_ranges.to_vec(), - )) - }; + let layout = + if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() { + None + } else { + Some(layout_cache.get( + &device, + &descriptor.layout, + descriptor.push_constant_ranges.to_vec(), + )) + }; - let device = self.device.clone(); - create_pipeline_task(async move { let vertex_buffer_layouts = descriptor .vertex .buffers @@ -737,39 +739,45 @@ impl PipelineCache { }), }; - Pipeline::RenderPipeline(device.create_render_pipeline(&descriptor)) + Ok(Pipeline::RenderPipeline( + device.create_render_pipeline(&descriptor), + )) }) } - fn try_start_create_compute_pipeline( + fn start_create_compute_pipeline( &mut self, id: CachedPipelineId, descriptor: ComputePipelineDescriptor, ) -> CachedPipelineState { - let compute_module = match self.shader_cache.get( - &self.device, - id, - descriptor.shader.id(), - &descriptor.shader_defs, - ) { - Ok(module) => module, - Err(err) => { - return CachedPipelineState::Err(err); - } - }; - - let layout = if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() { - None - } else { - Some(self.layout_cache.get( - &self.device, - &descriptor.layout, - descriptor.push_constant_ranges.to_vec(), - )) - }; - let device = self.device.clone(); + let shader_cache = self.shader_cache.clone(); + let layout_cache = self.layout_cache.clone(); create_pipeline_task(async move { + let mut shader_cache = shader_cache.lock().unwrap(); + let mut layout_cache = layout_cache.lock().unwrap(); + + let compute_module = match shader_cache.get( + &device, + id, + descriptor.shader.id(), + &descriptor.shader_defs, + ) { + Ok(module) => module, + Err(err) => return Err(err), + }; + + let layout = + if descriptor.layout.is_empty() && descriptor.push_constant_ranges.is_empty() { + None + } else { + Some(layout_cache.get( + &device, + &descriptor.layout, + descriptor.push_constant_ranges.to_vec(), + )) + }; + let descriptor = RawComputePipelineDescriptor { label: descriptor.label.as_deref(), layout: layout.as_deref(), @@ -777,7 +785,9 @@ impl PipelineCache { entry_point: &descriptor.entry_point, }; - Pipeline::ComputePipeline(device.create_compute_pipeline(&descriptor)) + Ok(Pipeline::ComputePipeline( + device.create_compute_pipeline(&descriptor), + )) }) } @@ -815,18 +825,22 @@ impl PipelineCache { CachedPipelineState::Queued => { cached_pipeline.state = match &cached_pipeline.descriptor { PipelineDescriptor::RenderPipelineDescriptor(descriptor) => { - self.try_start_create_render_pipeline(id, *descriptor.clone()) + self.start_create_render_pipeline(id, *descriptor.clone()) } PipelineDescriptor::ComputePipelineDescriptor(descriptor) => { - self.try_start_create_compute_pipeline(id, *descriptor.clone()) + self.start_create_compute_pipeline(id, *descriptor.clone()) } }; } CachedPipelineState::Creating(ref mut task) => { - if let Some(pipeline) = bevy_utils::futures::check_ready(task) { - cached_pipeline.state = CachedPipelineState::Ok(pipeline); - return; + match bevy_utils::futures::check_ready(task) { + Some(Ok(pipeline)) => { + cached_pipeline.state = CachedPipelineState::Ok(pipeline); + return; + } + Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err), + _ => (), } } @@ -837,7 +851,8 @@ impl PipelineCache { // Shader could not be processed ... retrying won't help PipelineCacheError::ProcessShaderError(err) => { - let error_detail = err.emit_to_string(&self.shader_cache.composer); + let error_detail = + err.emit_to_string(&self.shader_cache.lock().unwrap().composer); error!("failed to process shader:\n{}", error_detail); return; } @@ -882,7 +897,7 @@ impl PipelineCache { } fn create_pipeline_task( - task: impl Future + Send + 'static, + task: impl Future> + Send + 'static, ) -> CachedPipelineState { #[cfg(all( not(target_arch = "wasm32"), From e7a216ddec8298a4d51f593973e6608ea4307d69 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Wed, 31 Jan 2024 19:32:55 -0800 Subject: [PATCH 18/22] Drop locks sooner --- crates/bevy_render/src/render_resource/pipeline_cache.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 31b2629d1a987..0bc26af7b0fd4 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -699,6 +699,8 @@ impl PipelineCache { )) }; + drop((shader_cache, layout_cache)); + let vertex_buffer_layouts = descriptor .vertex .buffers @@ -778,6 +780,8 @@ impl PipelineCache { )) }; + drop((shader_cache, layout_cache)); + let descriptor = RawComputePipelineDescriptor { label: descriptor.label.as_deref(), layout: layout.as_deref(), @@ -881,6 +885,7 @@ impl PipelineCache { for event in events.read() { #[allow(clippy::match_same_arms)] match event { + // PERF: Instead of blocking waiting for the shader cache lock, try again next frame if the lock is currently held AssetEvent::Added { id } | AssetEvent::Modified { id } => { if let Some(shader) = shaders.get(*id) { cache.set_shader(*id, shader); From 9557aa31757ca6fc95c5287e1e0447cc41a4f370 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Thu, 1 Feb 2024 18:49:41 -0800 Subject: [PATCH 19/22] Fix compile error --- crates/bevy_render/src/render_resource/pipeline_cache.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 0bc26af7b0fd4..c4ec01b3944a1 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -916,7 +916,10 @@ fn create_pipeline_task( target_os = "macos", not(feature = "multi-threaded") ))] - CachedPipelineState::Ok(futures_lite::future::block_on(task)) + match futures_lite::future::block_on(task) { + Ok(pipeline) => CachedPipelineState::Ok(pipeline), + Err(err) => CachedPipelineState::Err(err), + } } /// Type of error returned by a [`PipelineCache`] when the creation of a GPU pipeline object failed. From 7b700b8668adabe65c67add487450362ffc24a03 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:11:42 -0800 Subject: [PATCH 20/22] Fix check_ready --- crates/bevy_utils/src/futures.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/crates/bevy_utils/src/futures.rs b/crates/bevy_utils/src/futures.rs index ca56a260cb54e..ab425cee3a6ca 100644 --- a/crates/bevy_utils/src/futures.rs +++ b/crates/bevy_utils/src/futures.rs @@ -10,17 +10,25 @@ use std::{ /// /// This will cancel the future if it's not ready. pub fn now_or_never(mut future: F) -> Option { - check_ready(&mut future) + let noop_waker = noop_waker(); + let mut cx = Context::from_waker(&noop_waker); + + // SAFETY: `future` is not moved and the original value is shadowed + let future = unsafe { Pin::new_unchecked(&mut future) }; + + match future.poll(&mut cx) { + Poll::Ready(x) => Some(x), + _ => None, + } } /// Polls a future once, and returns the output if ready /// or returns `None` if it wasn't ready yet. -pub fn check_ready(future: &mut F) -> Option { +pub fn check_ready(future: &mut F) -> Option { let noop_waker = noop_waker(); let mut cx = Context::from_waker(&noop_waker); - // SAFETY: `future` is not moved and the original value is shadowed - let future = unsafe { Pin::new_unchecked(future) }; + let future = Pin::new(future); match future.poll(&mut cx) { Poll::Ready(x) => Some(x), From 27a9273c20abbabaf7cf0a546b711d56cefde738 Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:30:01 -0800 Subject: [PATCH 21/22] Block waiting for the upscaling pipeline --- crates/bevy_core_pipeline/src/upscaling/mod.rs | 5 ++++- .../bevy_render/src/camera/camera_driver_node.rs | 7 +------ .../src/render_resource/pipeline_cache.rs | 16 ++++++++++++++++ 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/crates/bevy_core_pipeline/src/upscaling/mod.rs b/crates/bevy_core_pipeline/src/upscaling/mod.rs index 76475c8692bf5..a22db16013bc6 100644 --- a/crates/bevy_core_pipeline/src/upscaling/mod.rs +++ b/crates/bevy_core_pipeline/src/upscaling/mod.rs @@ -27,7 +27,7 @@ pub struct ViewUpscalingPipeline(CachedRenderPipelineId); fn prepare_view_upscaling_pipelines( mut commands: Commands, - pipeline_cache: Res, + mut pipeline_cache: ResMut, mut pipelines: ResMut>, blit_pipeline: Res, view_targets: Query<(Entity, &ViewTarget, Option<&ExtractedCamera>)>, @@ -49,6 +49,9 @@ fn prepare_view_upscaling_pipelines( }; let pipeline = pipelines.specialize(&pipeline_cache, &blit_pipeline, key); + // Ensure the pipeline is loaded before continuing the frame to prevent frames without any GPU work submitted + pipeline_cache.block_on_render_pipeline(pipeline); + commands .entity(entity) .insert(ViewUpscalingPipeline(pipeline)); diff --git a/crates/bevy_render/src/camera/camera_driver_node.rs b/crates/bevy_render/src/camera/camera_driver_node.rs index 9eb48dc145609..5845f216bb1bc 100644 --- a/crates/bevy_render/src/camera/camera_driver_node.rs +++ b/crates/bevy_render/src/camera/camera_driver_node.rs @@ -4,7 +4,6 @@ use crate::{ renderer::RenderContext, view::ExtractedWindows, }; -use bevy_core::FrameCount; use bevy_ecs::{prelude::QueryState, world::World}; use bevy_utils::HashSet; use wgpu::{LoadOp, Operations, RenderPassColorAttachment, RenderPassDescriptor, StoreOp}; @@ -54,14 +53,10 @@ impl Node for CameraDriverNode { } } - // For the first several frames, no pipelines have finished compiling, and so no work will have been sent to the GPU, - // causing error messages on startup. Scheduling some dummy work bypasses this. - let first_several_frames = world.resource::().0 <= 100; - // wgpu (and some backends) require doing work for swap chains if you call `get_current_texture()` and `present()` // This ensures that Bevy doesn't crash, even when there are no cameras (and therefore no work submitted). for (id, window) in world.resource::().iter() { - if camera_windows.contains(id) && !first_several_frames { + if camera_windows.contains(id) { continue; } diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index c4ec01b3944a1..8e45938b04401 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -561,6 +561,22 @@ impl PipelineCache { } } + /// Wait for a render pipeline to finish compiling. + #[inline] + pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId) { + if self.pipelines.len() <= id.0 { + self.process_queue(); + } + + let state = &mut self.pipelines[id.0].state; + if let CachedPipelineState::Creating(task) = state { + *state = match bevy_tasks::block_on(task) { + Ok(p) => CachedPipelineState::Ok(p), + Err(e) => CachedPipelineState::Err(e), + }; + } + } + /// Try to retrieve a compute pipeline GPU object from a cached ID. /// /// # Returns From a4cecb645b129f6a7302b1a4c65acf302a837a2d Mon Sep 17 00:00:00 2001 From: JMS55 <47158642+JMS55@users.noreply.github.com> Date: Fri, 2 Feb 2024 11:32:33 -0800 Subject: [PATCH 22/22] Update to naga_oil 0.13 --- crates/bevy_pbr/Cargo.toml | 4 ++-- crates/bevy_render/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/bevy_pbr/Cargo.toml b/crates/bevy_pbr/Cargo.toml index 93817b7aa9d3a..3c5584ecbeda4 100644 --- a/crates/bevy_pbr/Cargo.toml +++ b/crates/bevy_pbr/Cargo.toml @@ -40,11 +40,11 @@ smallvec = "1.6" thread_local = "1.0" [target.'cfg(target_arch = "wasm32")'.dependencies] -naga_oil = "0.12" +naga_oil = "0.13" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] # Omit the `glsl` feature in non-WebAssembly by default. -naga_oil = { version = "0.12", default-features = false, features = [ +naga_oil = { version = "0.13", default-features = false, features = [ "test_shader", ] } diff --git a/crates/bevy_render/Cargo.toml b/crates/bevy_render/Cargo.toml index e255103c97cd9..ce679e4f73810 100644 --- a/crates/bevy_render/Cargo.toml +++ b/crates/bevy_render/Cargo.toml @@ -73,7 +73,7 @@ wgpu = { version = "0.19.1", default-features = false, features = [ "fragile-send-sync-non-atomic-wasm", ] } naga = { version = "0.19", features = ["wgsl-in"] } -naga_oil = { version = "0.12", default-features = false, features = [ +naga_oil = { version = "0.13", default-features = false, features = [ "test_shader", ] } serde = { version = "1", features = ["derive"] }