Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[re_renderer] Interior mutable buffer/texture/bindgroup pools #1374

Merged
merged 10 commits into from
Feb 22, 2023
28 changes: 14 additions & 14 deletions crates/re_renderer/examples/framework.rs
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ impl<E: Example + 'static> Application<E> {
.texture
.create_view(&wgpu::TextureViewDescriptor::default());

let view_builders = self.example.draw(
let draw_results = self.example.draw(
&mut self.re_ctx,
[self.surface_config.width, self.surface_config.height],
&self.time,
Expand All @@ -250,7 +250,7 @@ impl<E: Example + 'static> Application<E> {
},
);

let view_cmd_buffers = {
{
let mut composite_pass =
composite_cmd_encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
Expand All @@ -265,22 +265,22 @@ impl<E: Example + 'static> Application<E> {
depth_stencil_attachment: None,
});

view_builders
.into_iter()
.map(|r| {
r.view_builder
.composite(&self.re_ctx, &mut composite_pass, r.target_location)
.expect("Failed to composite view main surface");
r.command_buffer
})
.collect::<Vec<_>>() // So we don't hold a reference to the render pass!

// drop the pass so we can finish() the main encoder!
for draw_result in &draw_results {
draw_result
.view_builder
.composite(
&self.re_ctx,
&mut composite_pass,
draw_result.target_location,
)
.expect("Failed to composite view main surface");
}
};

self.re_ctx.queue.submit(
view_cmd_buffers
draw_results
.into_iter()
.map(|d| d.command_buffer)
.chain(std::iter::once(composite_cmd_encoder.finish())),
);
frame.present();
Expand Down
9 changes: 7 additions & 2 deletions crates/re_renderer/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@ use crate::{
FileResolver, FileServer, FileSystem, RecommendedFileResolver,
};

// ---

/// Any resource involving wgpu rendering which can be re-used across different scenes.
/// I.e. render pipelines, resource pools, etc.
pub struct RenderContext {
Expand All @@ -25,6 +23,9 @@ pub struct RenderContext {
#[cfg(all(not(target_arch = "wasm32"), debug_assertions))] // native debug build
pub(crate) err_tracker: std::sync::Arc<crate::error_tracker::ErrorTracker>,

/// Utility type map that will be cleared every frame.
pub per_frame_data_helper: TypeMap,

pub gpu_resources: WgpuResourcePools,
pub mesh_manager: MeshManager,
pub texture_manager_2d: TextureManager2D,
Expand Down Expand Up @@ -144,6 +145,9 @@ impl RenderContext {
shared_renderer_data,

renderers,

per_frame_data_helper: TypeMap::new(),

gpu_resources,

mesh_manager,
Expand All @@ -163,6 +167,7 @@ impl RenderContext {
/// Updates internal book-keeping, frame allocators and executes delayed events like shader reloading.
pub fn begin_frame(&mut self) {
self.frame_index += 1;
self.per_frame_data_helper.clear();

// Tick the error tracker so that it knows when to reset!
// Note that we're ticking on begin_frame rather than raw frames, which
Expand Down
11 changes: 5 additions & 6 deletions crates/re_renderer/src/global_bindings.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
use crate::{
wgpu_buffer_types,
wgpu_resources::{
BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroupHandleStrong,
GpuBindGroupLayoutHandle, GpuBufferHandleStrong, GpuSamplerHandle, SamplerDesc,
WgpuResourcePools,
BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroup, GpuBindGroupLayoutHandle,
GpuBuffer, GpuSamplerHandle, SamplerDesc, WgpuResourcePools,
},
};

Expand Down Expand Up @@ -129,16 +128,16 @@ impl GlobalBindings {
&self,
pools: &mut WgpuResourcePools,
device: &wgpu::Device,
frame_uniform_buffer: &GpuBufferHandleStrong,
) -> GpuBindGroupHandleStrong {
frame_uniform_buffer: &GpuBuffer,
) -> GpuBindGroup {
pools.bind_groups.alloc(
device,
// Needs to be kept in sync with `global_bindings.wgsl` / `self.layout`
&BindGroupDesc {
label: "global bind group".into(),
entries: smallvec![
BindGroupEntry::Buffer {
handle: **frame_uniform_buffer,
handle: frame_uniform_buffer.handle,
offset: 0,
size: None,
},
Expand Down
35 changes: 12 additions & 23 deletions crates/re_renderer/src/mesh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ use crate::{
debug_label::DebugLabel,
resource_managers::{GpuTexture2DHandle, ResourceManagerError, TextureManager2D},
wgpu_resources::{
BindGroupDesc, BindGroupEntry, BufferDesc, GpuBindGroupHandleStrong,
GpuBindGroupLayoutHandle, GpuBufferHandleStrong, WgpuResourcePools,
BindGroupDesc, BindGroupEntry, BufferDesc, GpuBindGroup, GpuBindGroupLayoutHandle,
GpuBuffer, WgpuResourcePools,
},
};

Expand Down Expand Up @@ -103,11 +103,11 @@ pub struct Material {
pub(crate) struct GpuMesh {
// It would be desirable to put both vertex and index buffer into the same buffer, BUT
// WebGL doesn't allow us to do so! (see https://github.com/gfx-rs/wgpu/pull/3157)
pub index_buffer: GpuBufferHandleStrong,
pub index_buffer: GpuBuffer,

/// Buffer for all vertex data, subdivided in several sections for different vertex buffer bindings.
/// See [`mesh_vertices`]
pub vertex_buffer_combined: GpuBufferHandleStrong,
pub vertex_buffer_combined: GpuBuffer,
pub vertex_buffer_positions_range: Range<u64>,
pub vertex_buffer_data_range: Range<u64>,

Expand All @@ -122,7 +122,7 @@ pub(crate) struct GpuMaterial {
/// Index range within the owning [`Mesh`] that should be rendered with this material.
pub index_range: Range<u32>,

pub bind_group: GpuBindGroupHandleStrong,
pub bind_group: GpuBindGroup,
}

pub(crate) mod gpu_data {
Expand Down Expand Up @@ -171,10 +171,7 @@ impl GpuMesh {
);
let mut staging_buffer = queue
.write_buffer_with(
pools
.buffers
.get_resource(&vertex_buffer_combined)
.map_err(ResourceManagerError::ResourcePoolError)?,
&vertex_buffer_combined,
0,
vertex_buffer_combined_size.try_into().unwrap(),
)
Expand All @@ -184,6 +181,7 @@ impl GpuMesh {
staging_buffer
[vertex_buffer_positions_size as usize..vertex_buffer_combined_size as usize]
.copy_from_slice(bytemuck::cast_slice(&data.vertex_data));
drop(staging_buffer);
vertex_buffer_combined
};

Expand All @@ -198,17 +196,11 @@ impl GpuMesh {
},
);
let mut staging_buffer = queue
.write_buffer_with(
pools
.buffers
.get_resource(&index_buffer)
.map_err(ResourceManagerError::ResourcePoolError)?,
0,
index_buffer_size.try_into().unwrap(),
)
.write_buffer_with(&index_buffer, 0, index_buffer_size.try_into().unwrap())
.unwrap(); // Fails only if mapping is bigger than buffer size.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

unrelated, but there is a lot of scary unwraps in this code

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

all of those write_buffer_with will soon be replaced with the staging belt, making them at least look less scary 🙃

staging_buffer[..index_buffer_size as usize]
.copy_from_slice(bytemuck::cast_slice(&data.indices));
drop(staging_buffer);
index_buffer
};

Expand All @@ -229,10 +221,7 @@ impl GpuMesh {

let mut materials_staging_buffer = queue
.write_buffer_with(
pools
.buffers
.get_resource(&material_uniform_buffers)
.unwrap(),
&material_uniform_buffers,
0,
NonZeroU64::new(combined_buffers_size).unwrap(),
)
Expand All @@ -256,9 +245,9 @@ impl GpuMesh {
&BindGroupDesc {
label: material.label.clone(),
entries: smallvec![
BindGroupEntry::DefaultTextureView(**texture),
BindGroupEntry::DefaultTextureView(texture.handle),
BindGroupEntry::Buffer {
handle: *material_uniform_buffers,
handle: material_uniform_buffers.handle,
offset: material_buffer_range_start as _,
size: NonZeroU64::new(std::mem::size_of::<
gpu_data::MaterialUniformBuffer,
Expand Down
19 changes: 9 additions & 10 deletions crates/re_renderer/src/renderer/compositor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ use crate::{
context::SharedRendererData,
include_file,
wgpu_resources::{
BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroupHandleStrong,
GpuBindGroupLayoutHandle, GpuRenderPipelineHandle, GpuTextureHandleStrong,
PipelineLayoutDesc, RenderPipelineDesc, ShaderModuleDesc, WgpuResourcePools,
BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroup, GpuBindGroupLayoutHandle,
GpuRenderPipelineHandle, GpuTexture, PipelineLayoutDesc, RenderPipelineDesc,
ShaderModuleDesc, WgpuResourcePools,
},
};

Expand All @@ -19,17 +19,17 @@ pub struct Compositor {

#[derive(Clone)]
pub struct CompositorDrawData {
/// [`GpuBindGroupHandleStrong`] pointing at the current image source and
/// [`GpuBindGroup`] pointing at the current image source and
/// a uniform buffer for describing a tonemapper/compositor configuration.
bind_group: GpuBindGroupHandleStrong,
bind_group: GpuBindGroup,
}

impl DrawData for CompositorDrawData {
type Renderer = Compositor;
}

impl CompositorDrawData {
pub fn new(ctx: &mut RenderContext, target: &GpuTextureHandleStrong) -> Self {
pub fn new(ctx: &mut RenderContext, target: &GpuTexture) -> Self {
let pools = &mut ctx.gpu_resources;
let compositor = ctx.renderers.get_or_create::<_, Compositor>(
&ctx.shared_renderer_data,
Expand All @@ -42,7 +42,7 @@ impl CompositorDrawData {
&ctx.device,
&BindGroupDesc {
label: "compositor".into(),
entries: smallvec![BindGroupEntry::DefaultTextureView(**target)],
entries: smallvec![BindGroupEntry::DefaultTextureView(target.handle)],
layout: compositor.bind_group_layout,
},
&pools.bind_group_layouts,
Expand Down Expand Up @@ -130,13 +130,12 @@ impl Renderer for Compositor {
&self,
pools: &'a WgpuResourcePools,
pass: &mut wgpu::RenderPass<'a>,
draw_data: &CompositorDrawData,
draw_data: &'a CompositorDrawData,
) -> anyhow::Result<()> {
let pipeline = pools.render_pipelines.get_resource(self.render_pipeline)?;
let bind_group = pools.bind_groups.get_resource(&draw_data.bind_group)?;

pass.set_pipeline(pipeline);
pass.set_bind_group(1, bind_group, &[]);
pass.set_bind_group(1, &draw_data.bind_group, &[]);
pass.draw(0..3, 0..1);

Ok(())
Expand Down
39 changes: 14 additions & 25 deletions crates/re_renderer/src/renderer/lines.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ use crate::{
size::Size,
view_builder::ViewBuilder,
wgpu_resources::{
BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, BufferDesc, GpuBindGroupHandleStrong,
BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, BufferDesc, GpuBindGroup,
GpuBindGroupLayoutHandle, GpuRenderPipelineHandle, PipelineLayoutDesc, PoolError,
RenderPipelineDesc, ShaderModuleDesc, TextureDesc,
},
Expand Down Expand Up @@ -170,15 +170,15 @@ pub mod gpu_data {
/// Internal, ready to draw representation of [`LineBatchInfo`]
#[derive(Clone)]
struct LineStripBatch {
bind_group: GpuBindGroupHandleStrong,
bind_group: GpuBindGroup,
vertex_range: Range<u32>,
}

/// A line drawing operation. Encompasses several lines, each consisting of a list of positions.
/// Expected to be recrated every frame.
#[derive(Clone)]
pub struct LineDrawData {
bind_group_all_lines: Option<GpuBindGroupHandleStrong>,
bind_group_all_lines: Option<GpuBindGroup>,
batches: Vec<LineStripBatch>,
}

Expand Down Expand Up @@ -434,11 +434,7 @@ impl LineDrawData {
// Upload data from staging buffers to gpu.
ctx.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &ctx
.gpu_resources
.textures
.get_resource(&position_data_texture)?
.texture,
texture: &position_data_texture.texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
Expand All @@ -459,11 +455,7 @@ impl LineDrawData {
);
ctx.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &ctx
.gpu_resources
.textures
.get_resource(&line_strip_texture)?
.texture,
texture: &line_strip_texture.texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
Expand All @@ -488,8 +480,8 @@ impl LineDrawData {
&BindGroupDesc {
label: "line draw data".into(),
entries: smallvec![
BindGroupEntry::DefaultTextureView(*position_data_texture),
BindGroupEntry::DefaultTextureView(*line_strip_texture),
BindGroupEntry::DefaultTextureView(position_data_texture.handle),
BindGroupEntry::DefaultTextureView(line_strip_texture.handle),
],
layout: line_renderer.bind_group_layout_all_lines,
},
Expand All @@ -505,7 +497,7 @@ impl LineDrawData {
let allocation_size_per_uniform_buffer =
uniform_buffer_allocation_size::<gpu_data::BatchUniformBuffer>(&ctx.device);
let combined_buffers_size = allocation_size_per_uniform_buffer * batches.len() as u64;
let uniform_buffers_handle = ctx.gpu_resources.buffers.alloc(
let uniform_buffers = ctx.gpu_resources.buffers.alloc(
&ctx.device,
&BufferDesc {
label: "lines batch uniform buffers".into(),
Expand All @@ -517,10 +509,7 @@ impl LineDrawData {
let mut staging_buffer = ctx
.queue
.write_buffer_with(
ctx.gpu_resources
.buffers
.get_resource(&uniform_buffers_handle)
.unwrap(),
&uniform_buffers,
0,
NonZeroU64::new(combined_buffers_size).unwrap(),
)
Expand All @@ -544,7 +533,7 @@ impl LineDrawData {
&BindGroupDesc {
label: batch_info.label.clone(),
entries: smallvec![BindGroupEntry::Buffer {
handle: *uniform_buffers_handle,
handle: uniform_buffers.handle,
offset: offset as _,
size: NonZeroU64::new(
std::mem::size_of::<gpu_data::BatchUniformBuffer>() as _
Expand Down Expand Up @@ -704,19 +693,19 @@ impl Renderer for LineRenderer {
&self,
pools: &'a WgpuResourcePools,
pass: &mut wgpu::RenderPass<'a>,
draw_data: &Self::RendererDrawData,
draw_data: &'a Self::RendererDrawData,
) -> anyhow::Result<()> {
let Some(bind_group_all_lines) = &draw_data.bind_group_all_lines else {
return Ok(()); // No lines submitted.
};
let bind_group_line_data = pools.bind_groups.get_resource(bind_group_all_lines)?;
let pipeline = pools.render_pipelines.get_resource(self.render_pipeline)?;

pass.set_pipeline(pipeline);
pass.set_bind_group(1, bind_group_line_data, &[]);

pass.set_bind_group(1, bind_group_all_lines, &[]);

for batch in &draw_data.batches {
pass.set_bind_group(2, pools.bind_groups.get_resource(&batch.bind_group)?, &[]);
pass.set_bind_group(2, &batch.bind_group, &[]);
pass.draw(batch.vertex_range.clone(), 0..1);
}

Expand Down
Loading