diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 7e6e7ceef2447..4fd4c37e8a69d 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -99,6 +99,8 @@ jobs: os_version: "12.0" - device: "Samsung Galaxy S23" os_version: "13.0" + - device: "Google Pixel 8" + os_version: "14.0" steps: - uses: actions/checkout@v4 diff --git a/Cargo.toml b/Cargo.toml index 51cb064ffe6cb..8dd252ffb8cd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -232,6 +232,9 @@ accesskit_unix = ["bevy_internal/accesskit_unix"] # Enable assertions to check the validity of parameters passed to glam glam_assert = ["bevy_internal/glam_assert"] +# Enable assertions in debug builds to check the validity of parameters passed to glam +debug_glam_assert = ["bevy_internal/debug_glam_assert"] + # Include a default font, containing only ASCII characters, at the cost of a 20kB binary size increase default_font = ["bevy_internal/default_font"] @@ -241,11 +244,17 @@ shader_format_glsl = ["bevy_internal/shader_format_glsl"] # Enable support for shaders in SPIR-V shader_format_spirv = ["bevy_internal/shader_format_spirv"] -# Enable some limitations to be able to use WebGL2. If not enabled, it will default to WebGPU in Wasm +# Enable some limitations to be able to use WebGL2. If not enabled, it will default to WebGPU in Wasm. Please refer to the [WebGL2 and WebGPU](https://github.com/bevyengine/bevy/tree/latest/examples#webgl2-and-webgpu) section of the examples README for more information on how to run Wasm builds with WebGPU. webgl2 = ["bevy_internal/webgl"] +# Enables the built-in asset processor for processed assets. +asset_processor = ["bevy_internal/asset_processor"] + # Enables watching the filesystem for Bevy Asset hot-reloading -filesystem_watcher = ["bevy_internal/filesystem_watcher"] +file_watcher = ["bevy_internal/file_watcher"] + +# Enables watching in memory asset providers for Bevy Asset hot-reloading +embedded_watcher = ["bevy_internal/embedded_watcher"] [dependencies] bevy_dylib = { path = "crates/bevy_dylib", version = "0.12.0-dev", default-features = false, optional = true } @@ -1065,6 +1074,7 @@ wasm = true name = "hot_asset_reloading" path = "examples/asset/hot_asset_reloading.rs" doc-scrape-examples = true +required-features = ["file_watcher"] [package.metadata.example.hot_asset_reloading] name = "Hot Reloading of Assets" @@ -1076,7 +1086,7 @@ wasm = true name = "asset_processing" path = "examples/asset/processing/processing.rs" doc-scrape-examples = true -required-features = ["filesystem_watcher"] +required-features = ["file_watcher", "asset_processor"] [package.metadata.example.asset_processing] name = "Asset Processing" @@ -1394,6 +1404,16 @@ description = "Illustrates creating custom system parameters with `SystemParam`" category = "ECS (Entity Component System)" wasm = false +[[example]] +name = "time" +path = "examples/ecs/time.rs" + +[package.metadata.example.time] +name = "Time handling" +description = "Explains how Time is handled in ECS" +category = "ECS (Entity Component System)" +wasm = false + [[example]] name = "timers" path = "examples/ecs/timers.rs" @@ -1706,6 +1726,16 @@ description = "A shader and a material that uses it" category = "Shaders" wasm = true +[[example]] +name = "extended_material" +path = "examples/shader/extended_material.rs" + +[package.metadata.example.extended_material] +name = "Extended Material" +description = "A custom shader that builds on the standard material" +category = "Shaders" +wasm = true + [[example]] name = "shader_prepass" path = "examples/shader/shader_prepass.rs" diff --git a/README.md b/README.md index 9820b70393c6e..b551fe27baedc 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,10 @@ # [![Bevy](assets/branding/bevy_logo_light_dark_and_dimmed.svg)](https://bevyengine.org) +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) [![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy) -[![MIT/Apache 2.0](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) -[![Crates.io](https://img.shields.io/crates/d/bevy.svg)](https://crates.io/crates/bevy) -[![Rust](https://github.com/bevyengine/bevy/workflows/CI/badge.svg)](https://github.com/bevyengine/bevy/actions) +[![Downloads](https://img.shields.io/crates/d/bevy.svg)](https://crates.io/crates/bevy) +[![Docs](https://docs.rs/bevy/badge.svg)](https://docs.rs/bevy/latest/bevy/) +[![CI](https://github.com/bevyengine/bevy/workflows/CI/badge.svg)](https://github.com/bevyengine/bevy/actions) [![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) ## What is Bevy? @@ -47,8 +48,11 @@ Before contributing or participating in discussions with the community, you shou * **[GitHub Discussions](https://github.com/bevyengine/bevy/discussions):** The best place for questions about Bevy, answered right here! * **[Bevy Assets](https://bevyengine.org/assets/):** A collection of awesome Bevy projects, tools, plugins and learning materials. +### Contributing + If you'd like to help build Bevy, check out the **[Contributor's Guide](https://github.com/bevyengine/bevy/blob/main/CONTRIBUTING.md)**. -For simple problems, feel free to open an issue or PR and tackle it yourself! +For simple problems, feel free to [open an issue](https://github.com/bevyengine/bevy/issues) or +[PR](https://github.com/bevyengine/bevy/pulls) and tackle it yourself! For more complex architecture decisions and experimental mad science, please open an [RFC](https://github.com/bevyengine/rfcs) (Request For Comments) so we can brainstorm together effectively! diff --git a/assets/shaders/animate_shader.wgsl b/assets/shaders/animate_shader.wgsl index addb1dee89b34..0e369674ac38e 100644 --- a/assets/shaders/animate_shader.wgsl +++ b/assets/shaders/animate_shader.wgsl @@ -1,6 +1,8 @@ // The time since startup data is in the globals binding which is part of the mesh_view_bindings import -#import bevy_pbr::mesh_view_bindings globals -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::{ + mesh_view_bindings::globals, + forward_io::VertexOutput, +} fn oklab_to_linear_srgb(c: vec3) -> vec3 { let L = c.x; diff --git a/assets/shaders/array_texture.wgsl b/assets/shaders/array_texture.wgsl index 05b0b855313e6..24eaa1b549648 100644 --- a/assets/shaders/array_texture.wgsl +++ b/assets/shaders/array_texture.wgsl @@ -1,8 +1,10 @@ -#import bevy_pbr::forward_io VertexOutput -#import bevy_pbr::mesh_view_bindings view -#import bevy_pbr::pbr_types STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT, PbrInput, pbr_input_new -#import bevy_core_pipeline::tonemapping tone_mapping -#import bevy_pbr::pbr_functions as fns +#import bevy_pbr::{ + forward_io::VertexOutput, + mesh_view_bindings::view, + pbr_types::{STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT, PbrInput, pbr_input_new}, + pbr_functions as fns, +} +#import bevy_core_pipeline::tonemapping::tone_mapping @group(1) @binding(0) var my_array_texture: texture_2d_array; @group(1) @binding(1) var my_array_texture_sampler: sampler; @@ -46,5 +48,5 @@ fn fragment( ); pbr_input.V = fns::calculate_view(mesh.world_position, pbr_input.is_orthographic); - return tone_mapping(fns::pbr(pbr_input), view.color_grading); + return tone_mapping(fns::apply_pbr_lighting(pbr_input), view.color_grading); } diff --git a/assets/shaders/cubemap_unlit.wgsl b/assets/shaders/cubemap_unlit.wgsl index 56a5b005008e6..425eb2f5f2476 100644 --- a/assets/shaders/cubemap_unlit.wgsl +++ b/assets/shaders/cubemap_unlit.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput #ifdef CUBEMAP_ARRAY @group(1) @binding(0) var base_color_texture: texture_cube_array; diff --git a/assets/shaders/custom_gltf_2d.wgsl b/assets/shaders/custom_gltf_2d.wgsl index 5e0a908c875d4..1ea793ad35696 100644 --- a/assets/shaders/custom_gltf_2d.wgsl +++ b/assets/shaders/custom_gltf_2d.wgsl @@ -1,6 +1,7 @@ -#import bevy_sprite::mesh2d_view_bindings globals -#import bevy_sprite::mesh2d_bindings mesh -#import bevy_sprite::mesh2d_functions get_model_matrix, mesh2d_position_local_to_clip +#import bevy_sprite::{ + mesh2d_view_bindings::globals, + mesh2d_functions::{get_model_matrix, mesh2d_position_local_to_clip}, +} struct Vertex { @builtin(instance_index) instance_index: u32, diff --git a/assets/shaders/custom_material.wgsl b/assets/shaders/custom_material.wgsl index b1c8d75ed9ff5..90322438e68d2 100644 --- a/assets/shaders/custom_material.wgsl +++ b/assets/shaders/custom_material.wgsl @@ -1,6 +1,6 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput // we can import items from shader modules in the assets folder with a quoted path -#import "shaders/custom_material_import.wgsl" COLOR_MULTIPLIER +#import "shaders/custom_material_import.wgsl"::COLOR_MULTIPLIER struct CustomMaterial { color: vec4, diff --git a/assets/shaders/custom_material_screenspace_texture.wgsl b/assets/shaders/custom_material_screenspace_texture.wgsl index 50fdfdd2b10c3..a4afd6422530b 100644 --- a/assets/shaders/custom_material_screenspace_texture.wgsl +++ b/assets/shaders/custom_material_screenspace_texture.wgsl @@ -1,6 +1,8 @@ -#import bevy_pbr::mesh_view_bindings view -#import bevy_pbr::forward_io VertexOutput -#import bevy_pbr::utils coords_to_viewport_uv +#import bevy_pbr::{ + mesh_view_bindings::view, + forward_io::VertexOutput, + utils::coords_to_viewport_uv, +} @group(1) @binding(0) var texture: texture_2d; @group(1) @binding(1) var texture_sampler: sampler; diff --git a/assets/shaders/custom_vertex_attribute.wgsl b/assets/shaders/custom_vertex_attribute.wgsl index 01f6af42c4cb8..d17246f89abf0 100644 --- a/assets/shaders/custom_vertex_attribute.wgsl +++ b/assets/shaders/custom_vertex_attribute.wgsl @@ -1,5 +1,4 @@ -#import bevy_pbr::mesh_bindings mesh -#import bevy_pbr::mesh_functions get_model_matrix, mesh_position_local_to_clip +#import bevy_pbr::mesh_functions::{get_model_matrix, mesh_position_local_to_clip} struct CustomMaterial { color: vec4, diff --git a/assets/shaders/extended_material.wgsl b/assets/shaders/extended_material.wgsl new file mode 100644 index 0000000000000..c6fd8aea9dd79 --- /dev/null +++ b/assets/shaders/extended_material.wgsl @@ -0,0 +1,59 @@ +#import bevy_pbr::{ + pbr_fragment::pbr_input_from_standard_material, + pbr_functions::alpha_discard, +} + +#ifdef PREPASS_PIPELINE +#import bevy_pbr::{ + prepass_io::{VertexOutput, FragmentOutput}, + pbr_deferred_functions::deferred_output, +} +#else +#import bevy_pbr::{ + forward_io::{VertexOutput, FragmentOutput}, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, +} +#endif + +struct MyExtendedMaterial { + quantize_steps: u32, +} + +@group(1) @binding(100) +var my_extended_material: MyExtendedMaterial; + +@fragment +fn fragment( + in: VertexOutput, + @builtin(front_facing) is_front: bool, +) -> FragmentOutput { + // generate a PbrInput struct from the StandardMaterial bindings + var pbr_input = pbr_input_from_standard_material(in, is_front); + + // we can optionally modify the input before lighting and alpha_discard is applied + pbr_input.material.base_color.b = pbr_input.material.base_color.r; + + // alpha discard + pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); + +#ifdef PREPASS_PIPELINE + // in deferred mode we can't modify anything after that, as lighting is run in a separate fullscreen shader. + let out = deferred_output(in, pbr_input); +#else + var out: FragmentOutput; + // apply lighting + out.color = apply_pbr_lighting(pbr_input); + + // we can optionally modify the lit color before post-processing is applied + out.color = vec4(vec4(out.color * f32(my_extended_material.quantize_steps))) / f32(my_extended_material.quantize_steps); + + // apply in-shader post processing (fog, alpha-premultiply, and also tonemapping, debanding if the camera is non-hdr) + // note this does not include fullscreen postprocessing effects like bloom. + out.color = main_pass_post_lighting_processing(pbr_input, out.color); + + // we can optionally modify the final result here + out.color = out.color * 2.0; +#endif + + return out; +} diff --git a/assets/shaders/fallback_image_test.wgsl b/assets/shaders/fallback_image_test.wgsl index f51d9961cde7a..59124786580ab 100644 --- a/assets/shaders/fallback_image_test.wgsl +++ b/assets/shaders/fallback_image_test.wgsl @@ -1,6 +1,4 @@ -#import bevy_pbr::mesh_view_bindings -#import bevy_pbr::mesh_bindings -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput @group(1) @binding(0) var test_texture_1d: texture_1d; @group(1) @binding(1) var test_texture_1d_sampler: sampler; diff --git a/assets/shaders/instancing.wgsl b/assets/shaders/instancing.wgsl index 52e7d9696af81..5491921a6e151 100644 --- a/assets/shaders/instancing.wgsl +++ b/assets/shaders/instancing.wgsl @@ -1,5 +1,4 @@ -#import bevy_pbr::mesh_functions get_model_matrix, mesh_position_local_to_clip -#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_functions::{get_model_matrix, mesh_position_local_to_clip} struct Vertex { @location(0) position: vec3, diff --git a/assets/shaders/line_material.wgsl b/assets/shaders/line_material.wgsl index ed06a27cd069f..e2ae15c1312c0 100644 --- a/assets/shaders/line_material.wgsl +++ b/assets/shaders/line_material.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput struct LineMaterial { color: vec4, diff --git a/assets/shaders/post_processing.wgsl b/assets/shaders/post_processing.wgsl index 398c08d320893..37bc40c6fea7e 100644 --- a/assets/shaders/post_processing.wgsl +++ b/assets/shaders/post_processing.wgsl @@ -1,7 +1,5 @@ // This shader computes the chromatic aberration effect -#import bevy_pbr::utils - // Since post processing is a fullscreen effect, we use the fullscreen vertex shader provided by bevy. // This will import a vertex shader that renders a single fullscreen triangle. // @@ -20,7 +18,7 @@ // As you can see, the triangle ends up bigger than the screen. // // You don't need to worry about this too much since bevy will compute the correct UVs for you. -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput @group(0) @binding(0) var screen_texture: texture_2d; @group(0) @binding(1) var texture_sampler: sampler; diff --git a/assets/shaders/shader_defs.wgsl b/assets/shaders/shader_defs.wgsl index 0586b560c78ff..7b98daca05962 100644 --- a/assets/shaders/shader_defs.wgsl +++ b/assets/shaders/shader_defs.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput struct CustomMaterial { color: vec4, diff --git a/assets/shaders/show_prepass.wgsl b/assets/shaders/show_prepass.wgsl index 881c5724d27b4..e80ea5c39d171 100644 --- a/assets/shaders/show_prepass.wgsl +++ b/assets/shaders/show_prepass.wgsl @@ -1,7 +1,8 @@ -#import bevy_pbr::mesh_types -#import bevy_pbr::mesh_view_bindings globals -#import bevy_pbr::prepass_utils -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::{ + mesh_view_bindings::globals, + prepass_utils, + forward_io::VertexOutput, +} struct ShowPrepassSettings { show_depth: u32, diff --git a/assets/shaders/texture_binding_array.wgsl b/assets/shaders/texture_binding_array.wgsl index ee92b85b53231..440e3c6155700 100644 --- a/assets/shaders/texture_binding_array.wgsl +++ b/assets/shaders/texture_binding_array.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput @group(1) @binding(0) var textures: binding_array>; @group(1) @binding(1) var nearest_sampler: sampler; diff --git a/assets/shaders/tonemapping_test_patterns.wgsl b/assets/shaders/tonemapping_test_patterns.wgsl index 7e64d699bef22..891a66f3a1f45 100644 --- a/assets/shaders/tonemapping_test_patterns.wgsl +++ b/assets/shaders/tonemapping_test_patterns.wgsl @@ -1,10 +1,11 @@ -#import bevy_pbr::mesh_view_bindings -#import bevy_pbr::mesh_bindings -#import bevy_pbr::forward_io VertexOutput -#import bevy_pbr::utils PI +#import bevy_pbr::{ + mesh_view_bindings, + forward_io::VertexOutput, + utils::PI, +} #ifdef TONEMAP_IN_SHADER -#import bevy_core_pipeline::tonemapping tone_mapping +#import bevy_core_pipeline::tonemapping::tone_mapping #endif // Sweep across hues on y axis with value from 0.0 to +15EV across x axis @@ -55,7 +56,7 @@ fn fragment( } var color = vec4(out, 1.0); #ifdef TONEMAP_IN_SHADER - color = tone_mapping(color, bevy_pbr::mesh_view_bindings::view.color_grading); + color = tone_mapping(color, mesh_view_bindings::view.color_grading); #endif return color; } diff --git a/benches/benches/bevy_ecs/iteration/heavy_compute.rs b/benches/benches/bevy_ecs/iteration/heavy_compute.rs index c1b8598b97e97..9a53092903f48 100644 --- a/benches/benches/bevy_ecs/iteration/heavy_compute.rs +++ b/benches/benches/bevy_ecs/iteration/heavy_compute.rs @@ -20,7 +20,7 @@ pub fn heavy_compute(c: &mut Criterion) { group.warm_up_time(std::time::Duration::from_millis(500)); group.measurement_time(std::time::Duration::from_secs(4)); group.bench_function("base", |b| { - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); let mut world = World::default(); diff --git a/clippy.toml b/clippy.toml index 81951d92ec4d9..085da641febcb 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1 @@ -doc-valid-idents = ["sRGB", "NaN", "iOS", "glTF", "GitHub", "WebGPU", "GilRs"] +doc-valid-idents = ["sRGB", "NaN", "iOS", "glTF", "GitHub", "WebGL", "WebGPU", "GilRs"] diff --git a/crates/bevy_app/README.md b/crates/bevy_app/README.md new file mode 100644 index 0000000000000..e38f98875029f --- /dev/null +++ b/crates/bevy_app/README.md @@ -0,0 +1,9 @@ +# Bevy App + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy_app) +[![Downloads](https://img.shields.io/crates/d/bevy_app.svg)](https://crates.io/crates/bevy_app) +[![Docs](https://docs.rs/bevy_app/badge.svg)](https://docs.rs/bevy_app/latest/bevy_app/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) + +This crate is about everything concerning the highest-level, application layer of a [Bevy](https://crates.io/crates/bevy) app. diff --git a/crates/bevy_app/src/app.rs b/crates/bevy_app/src/app.rs index 2b942f94eca67..3b206ce2deaea 100644 --- a/crates/bevy_app/src/app.rs +++ b/crates/bevy_app/src/app.rs @@ -76,6 +76,7 @@ pub struct App { plugin_name_added: HashSet, /// A private counter to prevent incorrect calls to `App::run()` from `Plugin::build()` building_plugin_depth: usize, + plugins_state: PluginsState, } impl Debug for App { @@ -194,6 +195,19 @@ impl Default for App { } } +/// Plugins state in the application +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +pub enum PluginsState { + /// Plugins are being added. + Adding, + /// All plugins already added are ready. + Ready, + /// Finish has been executed for all plugins added. + Finished, + /// Cleanup has been executed for all plugins added. + Cleaned, +} + // Dummy plugin used to temporary hold the place in the plugin registry struct PlaceholderPlugin; impl Plugin for PlaceholderPlugin { @@ -221,6 +235,7 @@ impl App { plugin_name_added: Default::default(), main_schedule_label: Box::new(Main), building_plugin_depth: 0, + plugins_state: PluginsState::Adding, } } @@ -288,24 +303,37 @@ impl App { panic!("App::run() was called from within Plugin::build(), which is not allowed."); } + if app.plugins_state() == PluginsState::Ready { + // If we're already ready, we finish up now and advance one frame. + // This prevents black frames during the launch transition on iOS. + app.finish(); + app.cleanup(); + app.update(); + } + let runner = std::mem::replace(&mut app.runner, Box::new(run_once)); (runner)(app); } - /// Check that [`Plugin::ready`] of all plugins returns true. This is usually called by the + /// Check the state of all plugins already added to this app. This is usually called by the /// event loop, but can be useful for situations where you want to use [`App::update`] - pub fn ready(&self) -> bool { - for plugin in &self.plugin_registry { - if !plugin.ready(self) { - return false; + #[inline] + pub fn plugins_state(&self) -> PluginsState { + match self.plugins_state { + PluginsState::Adding => { + for plugin in &self.plugin_registry { + if !plugin.ready(self) { + return PluginsState::Adding; + } + } + PluginsState::Ready } + state => state, } - true } /// Run [`Plugin::finish`] for each plugin. This is usually called by the event loop once all - /// plugins are [`App::ready`], but can be useful for situations where you want to use - /// [`App::update`]. + /// plugins are ready, but can be useful for situations where you want to use [`App::update`]. pub fn finish(&mut self) { // temporarily remove the plugin registry to run each plugin's setup function on app. let plugin_registry = std::mem::take(&mut self.plugin_registry); @@ -313,6 +341,7 @@ impl App { plugin.finish(self); } self.plugin_registry = plugin_registry; + self.plugins_state = PluginsState::Finished; } /// Run [`Plugin::cleanup`] for each plugin. This is usually called by the event loop after @@ -324,6 +353,7 @@ impl App { plugin.cleanup(self); } self.plugin_registry = plugin_registry; + self.plugins_state = PluginsState::Cleaned; } /// Adds [`State`] and [`NextState`] resources, [`OnEnter`] and [`OnExit`] schedules @@ -688,6 +718,14 @@ impl App { /// [`PluginGroup`]:super::PluginGroup #[track_caller] pub fn add_plugins(&mut self, plugins: impl Plugins) -> &mut Self { + if matches!( + self.plugins_state(), + PluginsState::Cleaned | PluginsState::Finished + ) { + panic!( + "Plugins cannot be added after App::cleanup() or App::finish() has been called." + ); + } plugins.add_to_app(self); self } @@ -861,7 +899,7 @@ impl App { } /// When doing [ambiguity checking](bevy_ecs::schedule::ScheduleBuildSettings) this - /// ignores systems that are ambiguious on [`Component`] T. + /// ignores systems that are ambiguous on [`Component`] T. /// /// This settings only applies to the main world. To apply this to other worlds call the /// [corresponding method](World::allow_ambiguous_component) on World @@ -899,7 +937,7 @@ impl App { } /// When doing [ambiguity checking](bevy_ecs::schedule::ScheduleBuildSettings) this - /// ignores systems that are ambiguious on [`Resource`] T. + /// ignores systems that are ambiguous on [`Resource`] T. /// /// This settings only applies to the main world. To apply this to other worlds call the /// [corresponding method](World::allow_ambiguous_resource) on World @@ -939,14 +977,20 @@ impl App { } fn run_once(mut app: App) { - while !app.ready() { - #[cfg(not(target_arch = "wasm32"))] - bevy_tasks::tick_global_task_pools_on_main_thread(); + let plugins_state = app.plugins_state(); + if plugins_state != PluginsState::Cleaned { + while app.plugins_state() == PluginsState::Adding { + #[cfg(not(target_arch = "wasm32"))] + bevy_tasks::tick_global_task_pools_on_main_thread(); + } + app.finish(); + app.cleanup(); } - app.finish(); - app.cleanup(); - app.update(); + // if plugins where cleaned before the runner start, an update already ran + if plugins_state != PluginsState::Cleaned { + app.update(); + } } /// An event that indicates the [`App`] should exit. This will fully exit the app process at the diff --git a/crates/bevy_app/src/schedule_runner.rs b/crates/bevy_app/src/schedule_runner.rs index 0780a1b9a395a..18b2f0b61fb55 100644 --- a/crates/bevy_app/src/schedule_runner.rs +++ b/crates/bevy_app/src/schedule_runner.rs @@ -1,6 +1,7 @@ use crate::{ app::{App, AppExit}, plugin::Plugin, + PluginsState, }; use bevy_ecs::event::{Events, ManualEventReader}; use bevy_utils::{Duration, Instant}; @@ -71,17 +72,23 @@ impl Plugin for ScheduleRunnerPlugin { fn build(&self, app: &mut App) { let run_mode = self.run_mode; app.set_runner(move |mut app: App| { - while !app.ready() { - #[cfg(not(target_arch = "wasm32"))] - bevy_tasks::tick_global_task_pools_on_main_thread(); + let plugins_state = app.plugins_state(); + if plugins_state != PluginsState::Cleaned { + while app.plugins_state() == PluginsState::Adding { + #[cfg(not(target_arch = "wasm32"))] + bevy_tasks::tick_global_task_pools_on_main_thread(); + } + app.finish(); + app.cleanup(); } - app.finish(); - app.cleanup(); let mut app_exit_event_reader = ManualEventReader::::default(); match run_mode { RunMode::Once => { - app.update(); + // if plugins where cleaned before the runner start, an update already ran + if plugins_state != PluginsState::Cleaned { + app.update(); + } } RunMode::Loop { wait } => { let mut tick = move |app: &mut App, diff --git a/crates/bevy_asset/Cargo.toml b/crates/bevy_asset/Cargo.toml index c2877844f94fa..d67340effc47f 100644 --- a/crates/bevy_asset/Cargo.toml +++ b/crates/bevy_asset/Cargo.toml @@ -11,8 +11,11 @@ keywords = ["bevy"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -filesystem_watcher = ["notify-debouncer-full"] +file_watcher = ["notify-debouncer-full", "watch"] +embedded_watcher = ["file_watcher"] multi-threaded = ["bevy_tasks/multi-threaded"] +asset_processor = [] +watch = [] [dependencies] bevy_app = { path = "../bevy_app", version = "0.12.0-dev" } @@ -30,7 +33,7 @@ crossbeam-channel = "0.5" downcast-rs = "1.2" futures-io = "0.3" futures-lite = "1.12" -md5 = "0.7" +blake3 = "1.5" parking_lot = { version = "0.12", features = ["arc_lock", "send_guard"] } ron = "0.8" serde = { version = "1", features = ["derive"] } diff --git a/crates/bevy_asset/src/io/android.rs b/crates/bevy_asset/src/io/android.rs index a07043c4dd4bc..3c5902e95592e 100644 --- a/crates/bevy_asset/src/io/android.rs +++ b/crates/bevy_asset/src/io/android.rs @@ -1,6 +1,5 @@ use crate::io::{ - get_meta_path, AssetReader, AssetReaderError, AssetWatcher, EmptyPathStream, PathStream, - Reader, VecReader, + get_meta_path, AssetReader, AssetReaderError, EmptyPathStream, PathStream, Reader, VecReader, }; use bevy_log::error; use bevy_utils::BoxedFuture; @@ -71,11 +70,4 @@ impl AssetReader for AndroidAssetReader { error!("Reading directories is not supported with the AndroidAssetReader"); Box::pin(async move { Ok(false) }) } - - fn watch_for_changes( - &self, - _event_sender: crossbeam_channel::Sender, - ) -> Option> { - None - } } diff --git a/crates/bevy_asset/src/io/embedded/embedded_watcher.rs b/crates/bevy_asset/src/io/embedded/embedded_watcher.rs new file mode 100644 index 0000000000000..6e92caa5d3bb3 --- /dev/null +++ b/crates/bevy_asset/src/io/embedded/embedded_watcher.rs @@ -0,0 +1,88 @@ +use crate::io::{ + file::{get_asset_path, get_base_path, new_asset_event_debouncer, FilesystemEventHandler}, + memory::Dir, + AssetSourceEvent, AssetWatcher, +}; +use bevy_log::warn; +use bevy_utils::{Duration, HashMap}; +use notify_debouncer_full::{notify::RecommendedWatcher, Debouncer, FileIdMap}; +use parking_lot::RwLock; +use std::{ + fs::File, + io::{BufReader, Read}, + path::{Path, PathBuf}, + sync::Arc, +}; + +/// A watcher for assets stored in the `embedded` asset source. Embedded assets are assets whose +/// bytes have been embedded into the Rust binary using the [`embedded_asset`](crate::embedded_asset) macro. +/// This watcher will watch for changes to the "source files", read the contents of changed files from the file system +/// and overwrite the initial static bytes of the file embedded in the binary with the new dynamically loaded bytes. +pub struct EmbeddedWatcher { + _watcher: Debouncer, +} + +impl EmbeddedWatcher { + pub fn new( + dir: Dir, + root_paths: Arc>>, + sender: crossbeam_channel::Sender, + debounce_wait_time: Duration, + ) -> Self { + let root = get_base_path(); + let handler = EmbeddedEventHandler { + dir, + root: root.clone(), + sender, + root_paths, + last_event: None, + }; + let watcher = new_asset_event_debouncer(root, debounce_wait_time, handler).unwrap(); + Self { _watcher: watcher } + } +} + +impl AssetWatcher for EmbeddedWatcher {} + +/// A [`FilesystemEventHandler`] that uses [`EmbeddedAssetRegistry`](crate::io::embedded::EmbeddedAssetRegistry) to hot-reload +/// binary-embedded Rust source files. This will read the contents of changed files from the file system and overwrite +/// the initial static bytes from the file embedded in the binary. +pub(crate) struct EmbeddedEventHandler { + sender: crossbeam_channel::Sender, + root_paths: Arc>>, + root: PathBuf, + dir: Dir, + last_event: Option, +} +impl FilesystemEventHandler for EmbeddedEventHandler { + fn begin(&mut self) { + self.last_event = None; + } + + fn get_path(&self, absolute_path: &Path) -> Option<(PathBuf, bool)> { + let (local_path, is_meta) = get_asset_path(&self.root, absolute_path); + let final_path = self.root_paths.read().get(&local_path)?.clone(); + if is_meta { + warn!("Meta file asset hot-reloading is not supported yet: {final_path:?}"); + } + Some((final_path, false)) + } + + fn handle(&mut self, absolute_paths: &[PathBuf], event: AssetSourceEvent) { + if self.last_event.as_ref() != Some(&event) { + if let AssetSourceEvent::ModifiedAsset(path) = &event { + if let Ok(file) = File::open(&absolute_paths[0]) { + let mut reader = BufReader::new(file); + let mut buffer = Vec::new(); + + // Read file into vector. + if reader.read_to_end(&mut buffer).is_ok() { + self.dir.insert_asset(path, buffer); + } + } + } + self.last_event = Some(event.clone()); + self.sender.send(event).unwrap(); + } + } +} diff --git a/crates/bevy_asset/src/io/embedded/mod.rs b/crates/bevy_asset/src/io/embedded/mod.rs new file mode 100644 index 0000000000000..e5470cd3d5c3f --- /dev/null +++ b/crates/bevy_asset/src/io/embedded/mod.rs @@ -0,0 +1,252 @@ +#[cfg(feature = "embedded_watcher")] +mod embedded_watcher; + +#[cfg(feature = "embedded_watcher")] +pub use embedded_watcher::*; + +use crate::io::{ + memory::{Dir, MemoryAssetReader, Value}, + AssetSource, AssetSourceBuilders, +}; +use bevy_ecs::system::Resource; +use std::path::{Path, PathBuf}; + +pub const EMBEDDED: &str = "embedded"; + +/// A [`Resource`] that manages "rust source files" in a virtual in memory [`Dir`], which is intended +/// to be shared with a [`MemoryAssetReader`]. +/// Generally this should not be interacted with directly. The [`embedded_asset`] will populate this. +/// +/// [`embedded_asset`]: crate::embedded_asset +#[derive(Resource, Default)] +pub struct EmbeddedAssetRegistry { + dir: Dir, + #[cfg(feature = "embedded_watcher")] + root_paths: std::sync::Arc< + parking_lot::RwLock>, + >, +} + +impl EmbeddedAssetRegistry { + /// Inserts a new asset. `full_path` is the full path (as [`file`] would return for that file, if it was capable of + /// running in a non-rust file). `asset_path` is the path that will be used to identify the asset in the `embedded` + /// [`AssetSource`]. `value` is the bytes that will be returned for the asset. This can be _either_ a `&'static [u8]` + /// or a [`Vec`]. + #[allow(unused)] + pub fn insert_asset(&self, full_path: PathBuf, asset_path: &Path, value: impl Into) { + #[cfg(feature = "embedded_watcher")] + self.root_paths + .write() + .insert(full_path.to_owned(), asset_path.to_owned()); + self.dir.insert_asset(asset_path, value); + } + + /// Inserts new asset metadata. `full_path` is the full path (as [`file`] would return for that file, if it was capable of + /// running in a non-rust file). `asset_path` is the path that will be used to identify the asset in the `embedded` + /// [`AssetSource`]. `value` is the bytes that will be returned for the asset. This can be _either_ a `&'static [u8]` + /// or a [`Vec`]. + #[allow(unused)] + pub fn insert_meta(&self, full_path: &Path, asset_path: &Path, value: impl Into) { + #[cfg(feature = "embedded_watcher")] + self.root_paths + .write() + .insert(full_path.to_owned(), asset_path.to_owned()); + self.dir.insert_meta(asset_path, value); + } + + /// Registers a `embedded` [`AssetSource`] that uses this [`EmbeddedAssetRegistry`]. + // NOTE: unused_mut because embedded_watcher feature is the only mutable consumer of `let mut source` + #[allow(unused_mut)] + pub fn register_source(&self, sources: &mut AssetSourceBuilders) { + let dir = self.dir.clone(); + let processed_dir = self.dir.clone(); + let mut source = AssetSource::build() + .with_reader(move || Box::new(MemoryAssetReader { root: dir.clone() })) + .with_processed_reader(move || { + Box::new(MemoryAssetReader { + root: processed_dir.clone(), + }) + }); + + #[cfg(feature = "embedded_watcher")] + { + let root_paths = self.root_paths.clone(); + let dir = self.dir.clone(); + let processed_root_paths = self.root_paths.clone(); + let processd_dir = self.dir.clone(); + source = source + .with_watcher(move |sender| { + Some(Box::new(EmbeddedWatcher::new( + dir.clone(), + root_paths.clone(), + sender, + std::time::Duration::from_millis(300), + ))) + }) + .with_processed_watcher(move |sender| { + Some(Box::new(EmbeddedWatcher::new( + processd_dir.clone(), + processed_root_paths.clone(), + sender, + std::time::Duration::from_millis(300), + ))) + }); + } + sources.insert(EMBEDDED, source); + } +} + +/// Returns the [`Path`] for a given `embedded` asset. +/// This is used internally by [`embedded_asset`] and can be used to get a [`Path`] +/// that matches the [`AssetPath`](crate::AssetPath) used by that asset. +/// +/// [`embedded_asset`]: crate::embedded_asset +#[macro_export] +macro_rules! embedded_path { + ($path_str: expr) => {{ + embedded_path!("/src/", $path_str) + }}; + + ($source_path: expr, $path_str: expr) => {{ + let crate_name = module_path!().split(':').next().unwrap(); + let after_src = file!().split($source_path).nth(1).unwrap(); + let file_path = std::path::Path::new(after_src) + .parent() + .unwrap() + .join($path_str); + std::path::Path::new(crate_name).join(file_path) + }}; +} + +/// Creates a new `embedded` asset by embedding the bytes of the given path into the current binary +/// and registering those bytes with the `embedded` [`AssetSource`]. +/// +/// This accepts the current [`App`](bevy_app::App) as the first parameter and a path `&str` (relative to the current file) as the second. +/// +/// By default this will generate an [`AssetPath`] using the following rules: +/// +/// 1. Search for the first `$crate_name/src/` in the path and trim to the path past that point. +/// 2. Re-add the current `$crate_name` to the front of the path +/// +/// For example, consider the following file structure in the theoretical `bevy_rock` crate, which provides a Bevy [`Plugin`](bevy_app::Plugin) +/// that renders fancy rocks for scenes. +/// +/// * `bevy_rock` +/// * `src` +/// * `render` +/// * `rock.wgsl` +/// * `mod.rs` +/// * `lib.rs` +/// * `Cargo.toml` +/// +/// `rock.wgsl` is a WGSL shader asset that the `bevy_rock` plugin author wants to bundle with their crate. They invoke the following +/// in `bevy_rock/src/render/mod.rs`: +/// +/// `embedded_asset!(app, "rock.wgsl")` +/// +/// `rock.wgsl` can now be loaded by the [`AssetServer`](crate::AssetServer) with the following path: +/// +/// ```no_run +/// # use bevy_asset::{Asset, AssetServer}; +/// # use bevy_reflect::TypePath; +/// # let asset_server: AssetServer = panic!(); +/// #[derive(Asset, TypePath)] +/// # struct Shader; +/// let shader = asset_server.load::("embedded://bevy_rock/render/rock.wgsl"); +/// ``` +/// +/// Some things to note in the path: +/// 1. The non-default `embedded:://` [`AssetSource`] +/// 2. `src` is trimmed from the path +/// +/// The default behavior also works for cargo workspaces. Pretend the `bevy_rock` crate now exists in a larger workspace in +/// `$SOME_WORKSPACE/crates/bevy_rock`. The asset path would remain the same, because [`embedded_asset`] searches for the +/// _first instance_ of `bevy_rock/src` in the path. +/// +/// For most "standard crate structures" the default works just fine. But for some niche cases (such as cargo examples), +/// the `src` path will not be present. You can override this behavior by adding it as the second argument to [`embedded_asset`]: +/// +/// `embedded_asset!(app, "/examples/rock_stuff/", "rock.wgsl")` +/// +/// When there are three arguments, the second argument will replace the default `/src/` value. Note that these two are +/// equivalent: +/// +/// `embedded_asset!(app, "rock.wgsl")` +/// `embedded_asset!(app, "/src/", "rock.wgsl")` +/// +/// This macro uses the [`include_bytes`] macro internally and _will not_ reallocate the bytes. +/// Generally the [`AssetPath`] generated will be predictable, but if your asset isn't +/// available for some reason, you can use the [`embedded_path`] macro to debug. +/// +/// Hot-reloading `embedded` assets is supported. Just enable the `embedded_watcher` cargo feature. +/// +/// [`AssetPath`]: crate::AssetPath +/// [`embedded_asset`]: crate::embedded_asset +/// [`embedded_path`]: crate::embedded_path +#[macro_export] +macro_rules! embedded_asset { + ($app: ident, $path: expr) => {{ + embedded_asset!($app, "/src/", $path) + }}; + + ($app: ident, $source_path: expr, $path: expr) => {{ + let mut embedded = $app + .world + .resource_mut::<$crate::io::embedded::EmbeddedAssetRegistry>(); + let path = $crate::embedded_path!($source_path, $path); + #[cfg(feature = "embedded_watcher")] + let full_path = std::path::Path::new(file!()).parent().unwrap().join($path); + #[cfg(not(feature = "embedded_watcher"))] + let full_path = std::path::PathBuf::new(); + embedded.insert_asset(full_path, &path, include_bytes!($path)); + }}; +} + +/// Loads an "internal" asset by embedding the string stored in the given `path_str` and associates it with the given handle. +#[macro_export] +macro_rules! load_internal_asset { + ($app: ident, $handle: expr, $path_str: expr, $loader: expr) => {{ + let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); + assets.insert($handle, ($loader)( + include_str!($path_str), + std::path::Path::new(file!()) + .parent() + .unwrap() + .join($path_str) + .to_string_lossy() + )); + }}; + // we can't support params without variadic arguments, so internal assets with additional params can't be hot-reloaded + ($app: ident, $handle: ident, $path_str: expr, $loader: expr $(, $param:expr)+) => {{ + let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); + assets.insert($handle, ($loader)( + include_str!($path_str), + std::path::Path::new(file!()) + .parent() + .unwrap() + .join($path_str) + .to_string_lossy(), + $($param),+ + )); + }}; +} + +/// Loads an "internal" binary asset by embedding the bytes stored in the given `path_str` and associates it with the given handle. +#[macro_export] +macro_rules! load_internal_binary_asset { + ($app: ident, $handle: expr, $path_str: expr, $loader: expr) => {{ + let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); + assets.insert( + $handle, + ($loader)( + include_bytes!($path_str).as_ref(), + std::path::Path::new(file!()) + .parent() + .unwrap() + .join($path_str) + .to_string_lossy() + .into(), + ), + ); + }}; +} diff --git a/crates/bevy_asset/src/io/file/file_watcher.rs b/crates/bevy_asset/src/io/file/file_watcher.rs index 7f2d622135932..d701b225beda7 100644 --- a/crates/bevy_asset/src/io/file/file_watcher.rs +++ b/crates/bevy_asset/src/io/file/file_watcher.rs @@ -13,6 +13,11 @@ use notify_debouncer_full::{ }; use std::path::{Path, PathBuf}; +/// An [`AssetWatcher`] that watches the filesystem for changes to asset files in a given root folder and emits [`AssetSourceEvent`] +/// for each relevant change. This uses [`notify_debouncer_full`] to retrieve "debounced" filesystem events. +/// "Debouncing" defines a time window to hold on to events and then removes duplicate events that fall into this window. +/// This introduces a small delay in processing events, but it helps reduce event duplicates. A small delay is also necessary +/// on some systems to avoid processing a change event before it has actually been applied. pub struct FileWatcher { _watcher: Debouncer, } @@ -23,159 +28,248 @@ impl FileWatcher { sender: Sender, debounce_wait_time: Duration, ) -> Result { - let owned_root = root.clone(); - let mut debouncer = new_debouncer( + let root = super::get_base_path().join(root); + let watcher = new_asset_event_debouncer( + root.clone(), debounce_wait_time, - None, - move |result: DebounceEventResult| { - match result { - Ok(events) => { - for event in events.iter() { - match event.kind { - notify::EventKind::Create(CreateKind::File) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); + FileEventHandler { + root, + sender, + last_event: None, + }, + )?; + Ok(FileWatcher { _watcher: watcher }) + } +} + +impl AssetWatcher for FileWatcher {} + +pub(crate) fn get_asset_path(root: &Path, absolute_path: &Path) -> (PathBuf, bool) { + let relative_path = absolute_path.strip_prefix(root).unwrap(); + let is_meta = relative_path + .extension() + .map(|e| e == "meta") + .unwrap_or(false); + let asset_path = if is_meta { + relative_path.with_extension("") + } else { + relative_path.to_owned() + }; + (asset_path, is_meta) +} + +/// This is a bit more abstracted than it normally would be because we want to try _very hard_ not to duplicate this +/// event management logic across filesystem-driven [`AssetWatcher`] impls. Each operating system / platform behaves +/// a little differently and this is the result of a delicate balancing act that we should only perform once. +pub(crate) fn new_asset_event_debouncer( + root: PathBuf, + debounce_wait_time: Duration, + mut handler: impl FilesystemEventHandler, +) -> Result, notify::Error> { + let root = super::get_base_path().join(root); + let mut debouncer = new_debouncer( + debounce_wait_time, + None, + move |result: DebounceEventResult| { + match result { + Ok(events) => { + handler.begin(); + for event in events.iter() { + match event.kind { + notify::EventKind::Create(CreateKind::File) => { + if let Some((path, is_meta)) = handler.get_path(&event.paths[0]) { if is_meta { - sender.send(AssetSourceEvent::AddedMeta(path)).unwrap(); + handler.handle( + &event.paths, + AssetSourceEvent::AddedMeta(path), + ); } else { - sender.send(AssetSourceEvent::AddedAsset(path)).unwrap(); + handler.handle( + &event.paths, + AssetSourceEvent::AddedAsset(path), + ); } } - notify::EventKind::Create(CreateKind::Folder) => { - let (path, _) = get_asset_path(&owned_root, &event.paths[0]); - sender.send(AssetSourceEvent::AddedFolder(path)).unwrap(); + } + notify::EventKind::Create(CreateKind::Folder) => { + if let Some((path, _)) = handler.get_path(&event.paths[0]) { + handler + .handle(&event.paths, AssetSourceEvent::AddedFolder(path)); } - notify::EventKind::Access(AccessKind::Close(AccessMode::Write)) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); + } + notify::EventKind::Access(AccessKind::Close(AccessMode::Write)) => { + if let Some((path, is_meta)) = handler.get_path(&event.paths[0]) { if is_meta { - sender.send(AssetSourceEvent::ModifiedMeta(path)).unwrap(); + handler.handle( + &event.paths, + AssetSourceEvent::ModifiedMeta(path), + ); } else { - sender.send(AssetSourceEvent::ModifiedAsset(path)).unwrap(); + handler.handle( + &event.paths, + AssetSourceEvent::ModifiedAsset(path), + ); } } - notify::EventKind::Remove(RemoveKind::Any) | - // Because this is debounced over a reasonable period of time, "From" events are assumed to be "dangling" without - // a follow up "To" event. Without debouncing, "From" -> "To" -> "Both" events are emitted for renames. - // If a From is dangling, it is assumed to be "removed" from the context of the asset system. - notify::EventKind::Modify(ModifyKind::Name(RenameMode::From)) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - sender - .send(AssetSourceEvent::RemovedUnknown { path, is_meta }) - .unwrap(); + } + // Because this is debounced over a reasonable period of time, Modify(ModifyKind::Name(RenameMode::From) + // events are assumed to be "dangling" without a follow up "To" event. Without debouncing, "From" -> "To" -> "Both" + // events are emitted for renames. If a From is dangling, it is assumed to be "removed" from the context of the asset + // system. + notify::EventKind::Remove(RemoveKind::Any) + | notify::EventKind::Modify(ModifyKind::Name(RenameMode::From)) => { + if let Some((path, is_meta)) = handler.get_path(&event.paths[0]) { + handler.handle( + &event.paths, + AssetSourceEvent::RemovedUnknown { path, is_meta }, + ); } - notify::EventKind::Create(CreateKind::Any) - | notify::EventKind::Modify(ModifyKind::Name(RenameMode::To)) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - let event = if event.paths[0].is_dir() { + } + notify::EventKind::Create(CreateKind::Any) + | notify::EventKind::Modify(ModifyKind::Name(RenameMode::To)) => { + if let Some((path, is_meta)) = handler.get_path(&event.paths[0]) { + let asset_event = if event.paths[0].is_dir() { AssetSourceEvent::AddedFolder(path) } else if is_meta { AssetSourceEvent::AddedMeta(path) } else { AssetSourceEvent::AddedAsset(path) }; - sender.send(event).unwrap(); + handler.handle(&event.paths, asset_event); } - notify::EventKind::Modify(ModifyKind::Name(RenameMode::Both)) => { - let (old_path, old_is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - let (new_path, new_is_meta) = - get_asset_path(&owned_root, &event.paths[1]); - // only the new "real" path is considered a directory - if event.paths[1].is_dir() { - sender - .send(AssetSourceEvent::RenamedFolder { - old: old_path, - new: new_path, - }) - .unwrap(); - } else { - match (old_is_meta, new_is_meta) { - (true, true) => { - sender - .send(AssetSourceEvent::RenamedMeta { - old: old_path, - new: new_path, - }) - .unwrap(); - } - (false, false) => { - sender - .send(AssetSourceEvent::RenamedAsset { - old: old_path, - new: new_path, - }) - .unwrap(); - } - (true, false) => { - error!( - "Asset metafile {old_path:?} was changed to asset file {new_path:?}, which is not supported. Try restarting your app to see if configuration is still valid" + } + notify::EventKind::Modify(ModifyKind::Name(RenameMode::Both)) => { + let Some((old_path, old_is_meta)) = + handler.get_path(&event.paths[0]) + else { + continue; + }; + let Some((new_path, new_is_meta)) = + handler.get_path(&event.paths[1]) + else { + continue; + }; + // only the new "real" path is considered a directory + if event.paths[1].is_dir() { + handler.handle( + &event.paths, + AssetSourceEvent::RenamedFolder { + old: old_path, + new: new_path, + }, + ); + } else { + match (old_is_meta, new_is_meta) { + (true, true) => { + handler.handle( + &event.paths, + AssetSourceEvent::RenamedMeta { + old: old_path, + new: new_path, + }, ); - } - (false, true) => { - error!( - "Asset file {old_path:?} was changed to meta file {new_path:?}, which is not supported. Try restarting your app to see if configuration is still valid" + } + (false, false) => { + handler.handle( + &event.paths, + AssetSourceEvent::RenamedAsset { + old: old_path, + new: new_path, + }, ); - } + } + (true, false) => { + error!( + "Asset metafile {old_path:?} was changed to asset file {new_path:?}, which is not supported. Try restarting your app to see if configuration is still valid" + ); + } + (false, true) => { + error!( + "Asset file {old_path:?} was changed to meta file {new_path:?}, which is not supported. Try restarting your app to see if configuration is still valid" + ); } } } - notify::EventKind::Modify(_) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - if event.paths[0].is_dir() { - // modified folder means nothing in this case - } else if is_meta { - sender.send(AssetSourceEvent::ModifiedMeta(path)).unwrap(); - } else { - sender.send(AssetSourceEvent::ModifiedAsset(path)).unwrap(); - }; - } - notify::EventKind::Remove(RemoveKind::File) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - if is_meta { - sender.send(AssetSourceEvent::RemovedMeta(path)).unwrap(); - } else { - sender.send(AssetSourceEvent::RemovedAsset(path)).unwrap(); - } - } - notify::EventKind::Remove(RemoveKind::Folder) => { - let (path, _) = get_asset_path(&owned_root, &event.paths[0]); - sender.send(AssetSourceEvent::RemovedFolder(path)).unwrap(); + } + notify::EventKind::Modify(_) => { + let Some((path, is_meta)) = handler.get_path(&event.paths[0]) + else { + continue; + }; + if event.paths[0].is_dir() { + // modified folder means nothing in this case + } else if is_meta { + handler + .handle(&event.paths, AssetSourceEvent::ModifiedMeta(path)); + } else { + handler.handle( + &event.paths, + AssetSourceEvent::ModifiedAsset(path), + ); + }; + } + notify::EventKind::Remove(RemoveKind::File) => { + let Some((path, is_meta)) = handler.get_path(&event.paths[0]) + else { + continue; + }; + if is_meta { + handler + .handle(&event.paths, AssetSourceEvent::RemovedMeta(path)); + } else { + handler + .handle(&event.paths, AssetSourceEvent::RemovedAsset(path)); } - _ => {} } + notify::EventKind::Remove(RemoveKind::Folder) => { + let Some((path, _)) = handler.get_path(&event.paths[0]) else { + continue; + }; + handler.handle(&event.paths, AssetSourceEvent::RemovedFolder(path)); + } + _ => {} } } - Err(errors) => errors.iter().for_each(|error| { - error!("Encountered a filesystem watcher error {error:?}"); - }), } - }, - )?; - debouncer.watcher().watch(&root, RecursiveMode::Recursive)?; - debouncer.cache().add_root(&root, RecursiveMode::Recursive); - Ok(Self { - _watcher: debouncer, - }) - } + Err(errors) => errors.iter().for_each(|error| { + error!("Encountered a filesystem watcher error {error:?}"); + }), + } + }, + )?; + debouncer.watcher().watch(&root, RecursiveMode::Recursive)?; + debouncer.cache().add_root(&root, RecursiveMode::Recursive); + Ok(debouncer) } -impl AssetWatcher for FileWatcher {} +pub(crate) struct FileEventHandler { + sender: crossbeam_channel::Sender, + root: PathBuf, + last_event: Option, +} -pub(crate) fn get_asset_path(root: &Path, absolute_path: &Path) -> (PathBuf, bool) { - let relative_path = absolute_path.strip_prefix(root).unwrap(); - let is_meta = relative_path - .extension() - .map(|e| e == "meta") - .unwrap_or(false); - let asset_path = if is_meta { - relative_path.with_extension("") - } else { - relative_path.to_owned() - }; - (asset_path, is_meta) +impl FilesystemEventHandler for FileEventHandler { + fn begin(&mut self) { + self.last_event = None; + } + fn get_path(&self, absolute_path: &Path) -> Option<(PathBuf, bool)> { + Some(get_asset_path(&self.root, absolute_path)) + } + + fn handle(&mut self, _absolute_paths: &[PathBuf], event: AssetSourceEvent) { + if self.last_event.as_ref() != Some(&event) { + self.last_event = Some(event.clone()); + self.sender.send(event).unwrap(); + } + } +} + +pub(crate) trait FilesystemEventHandler: Send + Sync + 'static { + /// Called each time a set of debounced events is processed + fn begin(&mut self); + /// Returns an actual asset path (if one exists for the given `absolute_path`), as well as a [`bool`] that is + /// true if the `absolute_path` corresponds to a meta file. + fn get_path(&self, absolute_path: &Path) -> Option<(PathBuf, bool)>; + /// Handle the given event + fn handle(&mut self, absolute_paths: &[PathBuf], event: AssetSourceEvent); } diff --git a/crates/bevy_asset/src/io/file/mod.rs b/crates/bevy_asset/src/io/file/mod.rs index 859db14eda710..629fd7dd9c659 100644 --- a/crates/bevy_asset/src/io/file/mod.rs +++ b/crates/bevy_asset/src/io/file/mod.rs @@ -1,9 +1,11 @@ -#[cfg(feature = "filesystem_watcher")] +#[cfg(feature = "file_watcher")] mod file_watcher; +#[cfg(feature = "file_watcher")] +pub use file_watcher::*; use crate::io::{ - get_meta_path, AssetReader, AssetReaderError, AssetWatcher, AssetWriter, AssetWriterError, - PathStream, Reader, Writer, + get_meta_path, AssetReader, AssetReaderError, AssetWriter, AssetWriterError, PathStream, + Reader, Writer, }; use async_fs::{read_dir, File}; use bevy_utils::BoxedFuture; @@ -164,23 +166,6 @@ impl AssetReader for FileAssetReader { Ok(metadata.file_type().is_dir()) }) } - - fn watch_for_changes( - &self, - _event_sender: crossbeam_channel::Sender, - ) -> Option> { - #[cfg(feature = "filesystem_watcher")] - return Some(Box::new( - file_watcher::FileWatcher::new( - self.root_path.clone(), - _event_sender, - std::time::Duration::from_millis(300), - ) - .unwrap(), - )); - #[cfg(not(feature = "filesystem_watcher"))] - return None; - } } pub struct FileAssetWriter { diff --git a/crates/bevy_asset/src/io/gated.rs b/crates/bevy_asset/src/io/gated.rs index f200483759d7c..2c96399c0c0f7 100644 --- a/crates/bevy_asset/src/io/gated.rs +++ b/crates/bevy_asset/src/io/gated.rs @@ -96,11 +96,4 @@ impl AssetReader for GatedReader { ) -> BoxedFuture<'a, std::result::Result> { self.reader.is_directory(path) } - - fn watch_for_changes( - &self, - event_sender: Sender, - ) -> Option> { - self.reader.watch_for_changes(event_sender) - } } diff --git a/crates/bevy_asset/src/io/memory.rs b/crates/bevy_asset/src/io/memory.rs index 043592435192c..3dca5042dbd52 100644 --- a/crates/bevy_asset/src/io/memory.rs +++ b/crates/bevy_asset/src/io/memory.rs @@ -40,25 +40,31 @@ impl Dir { self.insert_meta(path, asset.as_bytes().to_vec()); } - pub fn insert_asset(&self, path: &Path, asset: Vec) { + pub fn insert_asset(&self, path: &Path, value: impl Into) { let mut dir = self.clone(); if let Some(parent) = path.parent() { dir = self.get_or_insert_dir(parent); } dir.0.write().assets.insert( path.file_name().unwrap().to_string_lossy().to_string(), - Data(Arc::new((asset, path.to_owned()))), + Data { + value: value.into(), + path: path.to_owned(), + }, ); } - pub fn insert_meta(&self, path: &Path, asset: Vec) { + pub fn insert_meta(&self, path: &Path, value: impl Into) { let mut dir = self.clone(); if let Some(parent) = path.parent() { dir = self.get_or_insert_dir(parent); } dir.0.write().metadata.insert( path.file_name().unwrap().to_string_lossy().to_string(), - Data(Arc::new((asset, path.to_owned()))), + Data { + value: value.into(), + path: path.to_owned(), + }, ); } @@ -117,11 +123,16 @@ impl Dir { pub struct DirStream { dir: Dir, index: usize, + dir_index: usize, } impl DirStream { fn new(dir: Dir) -> Self { - Self { dir, index: 0 } + Self { + dir, + index: 0, + dir_index: 0, + } } } @@ -133,10 +144,17 @@ impl Stream for DirStream { _cx: &mut std::task::Context<'_>, ) -> Poll> { let this = self.get_mut(); - let index = this.index; - this.index += 1; let dir = this.dir.0.read(); - Poll::Ready(dir.assets.values().nth(index).map(|d| d.path().to_owned())) + + let dir_index = this.dir_index; + if let Some(dir_path) = dir.dirs.keys().nth(dir_index).map(|d| dir.path.join(d)) { + this.dir_index += 1; + Poll::Ready(Some(dir_path)) + } else { + let index = this.index; + this.index += 1; + Poll::Ready(dir.assets.values().nth(index).map(|d| d.path().to_owned())) + } } } @@ -149,14 +167,45 @@ pub struct MemoryAssetReader { /// Asset data stored in a [`Dir`]. #[derive(Clone, Debug)] -pub struct Data(Arc<(Vec, PathBuf)>); +pub struct Data { + path: PathBuf, + value: Value, +} + +/// Stores either an allocated vec of bytes or a static array of bytes. +#[derive(Clone, Debug)] +pub enum Value { + Vec(Arc>), + Static(&'static [u8]), +} impl Data { fn path(&self) -> &Path { - &self.0 .1 + &self.path } - fn data(&self) -> &[u8] { - &self.0 .0 + fn value(&self) -> &[u8] { + match &self.value { + Value::Vec(vec) => vec, + Value::Static(value) => value, + } + } +} + +impl From> for Value { + fn from(value: Vec) -> Self { + Self::Vec(Arc::new(value)) + } +} + +impl From<&'static [u8]> for Value { + fn from(value: &'static [u8]) -> Self { + Self::Static(value) + } +} + +impl From<&'static [u8; N]> for Value { + fn from(value: &'static [u8; N]) -> Self { + Self::Static(value) } } @@ -171,10 +220,11 @@ impl AsyncRead for DataReader { cx: &mut std::task::Context<'_>, buf: &mut [u8], ) -> std::task::Poll> { - if self.bytes_read >= self.data.data().len() { + if self.bytes_read >= self.data.value().len() { Poll::Ready(Ok(0)) } else { - let n = ready!(Pin::new(&mut &self.data.data()[self.bytes_read..]).poll_read(cx, buf))?; + let n = + ready!(Pin::new(&mut &self.data.value()[self.bytes_read..]).poll_read(cx, buf))?; self.bytes_read += n; Poll::Ready(Ok(n)) } @@ -196,7 +246,7 @@ impl AssetReader for MemoryAssetReader { }); reader }) - .ok_or(AssetReaderError::NotFound(PathBuf::new())) + .ok_or_else(|| AssetReaderError::NotFound(path.to_path_buf())) }) } @@ -214,7 +264,7 @@ impl AssetReader for MemoryAssetReader { }); reader }) - .ok_or(AssetReaderError::NotFound(PathBuf::new())) + .ok_or_else(|| AssetReaderError::NotFound(path.to_path_buf())) }) } @@ -229,7 +279,7 @@ impl AssetReader for MemoryAssetReader { let stream: Box = Box::new(DirStream::new(dir)); stream }) - .ok_or(AssetReaderError::NotFound(PathBuf::new())) + .ok_or_else(|| AssetReaderError::NotFound(path.to_path_buf())) }) } @@ -239,13 +289,6 @@ impl AssetReader for MemoryAssetReader { ) -> BoxedFuture<'a, std::result::Result> { Box::pin(async move { Ok(self.root.get_dir(path).is_some()) }) } - - fn watch_for_changes( - &self, - _event_sender: crossbeam_channel::Sender, - ) -> Option> { - None - } } #[cfg(test)] @@ -263,12 +306,12 @@ pub mod test { dir.insert_asset(a_path, a_data.clone()); let asset = dir.get_asset(a_path).unwrap(); assert_eq!(asset.path(), a_path); - assert_eq!(asset.data(), a_data); + assert_eq!(asset.value(), a_data); dir.insert_meta(a_path, a_meta.clone()); let meta = dir.get_metadata(a_path).unwrap(); assert_eq!(meta.path(), a_path); - assert_eq!(meta.data(), a_meta); + assert_eq!(meta.value(), a_meta); let b_path = Path::new("x/y/b.txt"); let b_data = "b".as_bytes().to_vec(); @@ -278,10 +321,10 @@ pub mod test { let asset = dir.get_asset(b_path).unwrap(); assert_eq!(asset.path(), b_path); - assert_eq!(asset.data(), b_data); + assert_eq!(asset.value(), b_data); let meta = dir.get_metadata(b_path).unwrap(); assert_eq!(meta.path(), b_path); - assert_eq!(meta.data(), b_meta); + assert_eq!(meta.value(), b_meta); } } diff --git a/crates/bevy_asset/src/io/mod.rs b/crates/bevy_asset/src/io/mod.rs index a29902c5837b2..14e52cddcb597 100644 --- a/crates/bevy_asset/src/io/mod.rs +++ b/crates/bevy_asset/src/io/mod.rs @@ -1,5 +1,6 @@ #[cfg(target_os = "android")] pub mod android; +pub mod embedded; #[cfg(not(target_arch = "wasm32"))] pub mod file; pub mod gated; @@ -8,13 +9,12 @@ pub mod processor_gated; #[cfg(target_arch = "wasm32")] pub mod wasm; -mod provider; +mod source; pub use futures_lite::{AsyncReadExt, AsyncWriteExt}; -pub use provider::*; +pub use source::*; use bevy_utils::BoxedFuture; -use crossbeam_channel::Sender; use futures_io::{AsyncRead, AsyncWrite}; use futures_lite::{ready, Stream}; use std::{ @@ -65,13 +65,6 @@ pub trait AssetReader: Send + Sync + 'static { path: &'a Path, ) -> BoxedFuture<'a, Result>; - /// Returns an Asset watcher that will send events on the given channel. - /// If this reader does not support watching for changes, this will return [`None`]. - fn watch_for_changes( - &self, - event_sender: Sender, - ) -> Option>; - /// Reads asset metadata bytes at the given `path` into a [`Vec`]. This is a convenience /// function that wraps [`AssetReader::read_meta`] by default. fn read_meta_bytes<'a>( @@ -179,7 +172,7 @@ pub trait AssetWriter: Send + Sync + 'static { } /// An "asset source change event" that occurs whenever asset (or asset metadata) is created/added/removed -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum AssetSourceEvent { /// An asset at this path was added. AddedAsset(PathBuf), @@ -218,8 +211,6 @@ pub enum AssetSourceEvent { /// A handle to an "asset watcher" process, that will listen for and emit [`AssetSourceEvent`] values for as long as /// [`AssetWatcher`] has not been dropped. -/// -/// See [`AssetReader::watch_for_changes`]. pub trait AssetWatcher: Send + Sync + 'static {} /// An [`AsyncRead`] implementation capable of reading a [`Vec`]. diff --git a/crates/bevy_asset/src/io/processor_gated.rs b/crates/bevy_asset/src/io/processor_gated.rs index 1e578771f723e..1705900aeb9e1 100644 --- a/crates/bevy_asset/src/io/processor_gated.rs +++ b/crates/bevy_asset/src/io/processor_gated.rs @@ -1,5 +1,5 @@ use crate::{ - io::{AssetReader, AssetReaderError, PathStream, Reader}, + io::{AssetReader, AssetReaderError, AssetSourceId, PathStream, Reader}, processor::{AssetProcessorData, ProcessStatus}, AssetPath, }; @@ -15,13 +15,19 @@ use std::{path::Path, pin::Pin, sync::Arc}; /// [`AssetProcessor`]: crate::processor::AssetProcessor pub struct ProcessorGatedReader { reader: Box, + source: AssetSourceId<'static>, processor_data: Arc, } impl ProcessorGatedReader { /// Creates a new [`ProcessorGatedReader`]. - pub fn new(reader: Box, processor_data: Arc) -> Self { + pub fn new( + source: AssetSourceId<'static>, + reader: Box, + processor_data: Arc, + ) -> Self { Self { + source, processor_data, reader, } @@ -31,12 +37,12 @@ impl ProcessorGatedReader { /// while it is held. async fn get_transaction_lock( &self, - path: &Path, + path: &AssetPath<'static>, ) -> Result, AssetReaderError> { let infos = self.processor_data.asset_infos.read().await; let info = infos - .get(&AssetPath::from_path(path.to_path_buf())) - .ok_or_else(|| AssetReaderError::NotFound(path.to_owned()))?; + .get(path) + .ok_or_else(|| AssetReaderError::NotFound(path.path().to_owned()))?; Ok(info.file_transaction_lock.read_arc().await) } } @@ -47,20 +53,20 @@ impl AssetReader for ProcessorGatedReader { path: &'a Path, ) -> BoxedFuture<'a, Result>, AssetReaderError>> { Box::pin(async move { - trace!("Waiting for processing to finish before reading {:?}", path); - let process_result = self.processor_data.wait_until_processed(path).await; + let asset_path = AssetPath::from(path.to_path_buf()).with_source(self.source.clone()); + trace!("Waiting for processing to finish before reading {asset_path}"); + let process_result = self + .processor_data + .wait_until_processed(asset_path.clone()) + .await; match process_result { ProcessStatus::Processed => {} ProcessStatus::Failed | ProcessStatus::NonExistent => { - return Err(AssetReaderError::NotFound(path.to_owned())) + return Err(AssetReaderError::NotFound(path.to_owned())); } } - trace!( - "Processing finished with {:?}, reading {:?}", - process_result, - path - ); - let lock = self.get_transaction_lock(path).await?; + trace!("Processing finished with {asset_path}, reading {process_result:?}",); + let lock = self.get_transaction_lock(&asset_path).await?; let asset_reader = self.reader.read(path).await?; let reader: Box> = Box::new(TransactionLockedReader::new(asset_reader, lock)); @@ -73,23 +79,20 @@ impl AssetReader for ProcessorGatedReader { path: &'a Path, ) -> BoxedFuture<'a, Result>, AssetReaderError>> { Box::pin(async move { - trace!( - "Waiting for processing to finish before reading meta {:?}", - path - ); - let process_result = self.processor_data.wait_until_processed(path).await; + let asset_path = AssetPath::from(path.to_path_buf()).with_source(self.source.clone()); + trace!("Waiting for processing to finish before reading meta for {asset_path}",); + let process_result = self + .processor_data + .wait_until_processed(asset_path.clone()) + .await; match process_result { ProcessStatus::Processed => {} ProcessStatus::Failed | ProcessStatus::NonExistent => { return Err(AssetReaderError::NotFound(path.to_owned())); } } - trace!( - "Processing finished with {:?}, reading meta {:?}", - process_result, - path - ); - let lock = self.get_transaction_lock(path).await?; + trace!("Processing finished with {process_result:?}, reading meta for {asset_path}",); + let lock = self.get_transaction_lock(&asset_path).await?; let meta_reader = self.reader.read_meta(path).await?; let reader: Box> = Box::new(TransactionLockedReader::new(meta_reader, lock)); Ok(reader) @@ -127,13 +130,6 @@ impl AssetReader for ProcessorGatedReader { Ok(result) }) } - - fn watch_for_changes( - &self, - event_sender: crossbeam_channel::Sender, - ) -> Option> { - self.reader.watch_for_changes(event_sender) - } } /// An [`AsyncRead`] impl that will hold its asset's transaction lock until [`TransactionLockedReader`] is dropped. diff --git a/crates/bevy_asset/src/io/provider.rs b/crates/bevy_asset/src/io/provider.rs deleted file mode 100644 index d41d8248ce042..0000000000000 --- a/crates/bevy_asset/src/io/provider.rs +++ /dev/null @@ -1,190 +0,0 @@ -use bevy_ecs::system::Resource; -use bevy_utils::HashMap; - -use crate::{ - io::{AssetReader, AssetWriter}, - AssetPlugin, -}; - -/// A reference to an "asset provider", which maps to an [`AssetReader`] and/or [`AssetWriter`]. -#[derive(Default, Clone, Debug)] -pub enum AssetProvider { - /// The default asset provider - #[default] - Default, - /// A custom / named asset provider - Custom(String), -} - -/// A [`Resource`] that hold (repeatable) functions capable of producing new [`AssetReader`] and [`AssetWriter`] instances -/// for a given [`AssetProvider`]. -#[derive(Resource, Default)] -pub struct AssetProviders { - readers: HashMap Box + Send + Sync>>, - writers: HashMap Box + Send + Sync>>, - default_file_source: Option, - default_file_destination: Option, -} - -impl AssetProviders { - /// Inserts a new `get_reader` function with the given `provider` name. This function will be used to create new [`AssetReader`]s - /// when they are requested for the given `provider`. - pub fn insert_reader( - &mut self, - provider: &str, - get_reader: impl FnMut() -> Box + Send + Sync + 'static, - ) { - self.readers - .insert(provider.to_string(), Box::new(get_reader)); - } - /// Inserts a new `get_reader` function with the given `provider` name. This function will be used to create new [`AssetReader`]s - /// when they are requested for the given `provider`. - pub fn with_reader( - mut self, - provider: &str, - get_reader: impl FnMut() -> Box + Send + Sync + 'static, - ) -> Self { - self.insert_reader(provider, get_reader); - self - } - /// Inserts a new `get_writer` function with the given `provider` name. This function will be used to create new [`AssetWriter`]s - /// when they are requested for the given `provider`. - pub fn insert_writer( - &mut self, - provider: &str, - get_writer: impl FnMut() -> Box + Send + Sync + 'static, - ) { - self.writers - .insert(provider.to_string(), Box::new(get_writer)); - } - /// Inserts a new `get_writer` function with the given `provider` name. This function will be used to create new [`AssetWriter`]s - /// when they are requested for the given `provider`. - pub fn with_writer( - mut self, - provider: &str, - get_writer: impl FnMut() -> Box + Send + Sync + 'static, - ) -> Self { - self.insert_writer(provider, get_writer); - self - } - /// Returns the default "asset source" path for the [`FileAssetReader`] and [`FileAssetWriter`]. - /// - /// [`FileAssetReader`]: crate::io::file::FileAssetReader - /// [`FileAssetWriter`]: crate::io::file::FileAssetWriter - pub fn default_file_source(&self) -> &str { - self.default_file_source - .as_deref() - .unwrap_or(AssetPlugin::DEFAULT_FILE_SOURCE) - } - - /// Sets the default "asset source" path for the [`FileAssetReader`] and [`FileAssetWriter`]. - /// - /// [`FileAssetReader`]: crate::io::file::FileAssetReader - /// [`FileAssetWriter`]: crate::io::file::FileAssetWriter - pub fn with_default_file_source(mut self, path: String) -> Self { - self.default_file_source = Some(path); - self - } - - /// Sets the default "asset destination" path for the [`FileAssetReader`] and [`FileAssetWriter`]. - /// - /// [`FileAssetReader`]: crate::io::file::FileAssetReader - /// [`FileAssetWriter`]: crate::io::file::FileAssetWriter - pub fn with_default_file_destination(mut self, path: String) -> Self { - self.default_file_destination = Some(path); - self - } - - /// Returns the default "asset destination" path for the [`FileAssetReader`] and [`FileAssetWriter`]. - /// - /// [`FileAssetReader`]: crate::io::file::FileAssetReader - /// [`FileAssetWriter`]: crate::io::file::FileAssetWriter - pub fn default_file_destination(&self) -> &str { - self.default_file_destination - .as_deref() - .unwrap_or(AssetPlugin::DEFAULT_FILE_DESTINATION) - } - - /// Returns a new "source" [`AssetReader`] for the given [`AssetProvider`]. - pub fn get_source_reader(&mut self, provider: &AssetProvider) -> Box { - match provider { - AssetProvider::Default => { - #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] - let reader = super::file::FileAssetReader::new(self.default_file_source()); - #[cfg(target_arch = "wasm32")] - let reader = super::wasm::HttpWasmAssetReader::new(self.default_file_source()); - #[cfg(target_os = "android")] - let reader = super::android::AndroidAssetReader; - Box::new(reader) - } - AssetProvider::Custom(provider) => { - let get_reader = self - .readers - .get_mut(provider) - .unwrap_or_else(|| panic!("Asset Provider {} does not exist", provider)); - (get_reader)() - } - } - } - /// Returns a new "destination" [`AssetReader`] for the given [`AssetProvider`]. - pub fn get_destination_reader(&mut self, provider: &AssetProvider) -> Box { - match provider { - AssetProvider::Default => { - #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] - let reader = super::file::FileAssetReader::new(self.default_file_destination()); - #[cfg(target_arch = "wasm32")] - let reader = super::wasm::HttpWasmAssetReader::new(self.default_file_destination()); - #[cfg(target_os = "android")] - let reader = super::android::AndroidAssetReader; - Box::new(reader) - } - AssetProvider::Custom(provider) => { - let get_reader = self - .readers - .get_mut(provider) - .unwrap_or_else(|| panic!("Asset Provider {} does not exist", provider)); - (get_reader)() - } - } - } - /// Returns a new "source" [`AssetWriter`] for the given [`AssetProvider`]. - pub fn get_source_writer(&mut self, provider: &AssetProvider) -> Box { - match provider { - AssetProvider::Default => { - #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] - return Box::new(super::file::FileAssetWriter::new( - self.default_file_source(), - )); - #[cfg(any(target_arch = "wasm32", target_os = "android"))] - panic!("Writing assets isn't supported on this platform yet"); - } - AssetProvider::Custom(provider) => { - let get_writer = self - .writers - .get_mut(provider) - .unwrap_or_else(|| panic!("Asset Provider {} does not exist", provider)); - (get_writer)() - } - } - } - /// Returns a new "destination" [`AssetWriter`] for the given [`AssetProvider`]. - pub fn get_destination_writer(&mut self, provider: &AssetProvider) -> Box { - match provider { - AssetProvider::Default => { - #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] - return Box::new(super::file::FileAssetWriter::new( - self.default_file_destination(), - )); - #[cfg(any(target_arch = "wasm32", target_os = "android"))] - panic!("Writing assets isn't supported on this platform yet"); - } - AssetProvider::Custom(provider) => { - let get_writer = self - .writers - .get_mut(provider) - .unwrap_or_else(|| panic!("Asset Provider {} does not exist", provider)); - (get_writer)() - } - } - } -} diff --git a/crates/bevy_asset/src/io/source.rs b/crates/bevy_asset/src/io/source.rs new file mode 100644 index 0000000000000..8ee192462a31c --- /dev/null +++ b/crates/bevy_asset/src/io/source.rs @@ -0,0 +1,558 @@ +use crate::{ + io::{ + processor_gated::ProcessorGatedReader, AssetReader, AssetSourceEvent, AssetWatcher, + AssetWriter, + }, + processor::AssetProcessorData, +}; +use bevy_ecs::system::Resource; +use bevy_log::{error, warn}; +use bevy_utils::{CowArc, Duration, HashMap}; +use std::{fmt::Display, hash::Hash, sync::Arc}; +use thiserror::Error; + +/// A reference to an "asset source", which maps to an [`AssetReader`] and/or [`AssetWriter`]. +/// +/// * [`AssetSourceId::Default`] corresponds to "default asset paths" that don't specify a source: `/path/to/asset.png` +/// * [`AssetSourceId::Name`] corresponds to asset paths that _do_ specify a source: `remote://path/to/asset.png`, where `remote` is the name. +#[derive(Default, Clone, Debug, Eq)] +pub enum AssetSourceId<'a> { + /// The default asset source. + #[default] + Default, + /// A non-default named asset source. + Name(CowArc<'a, str>), +} + +impl<'a> Display for AssetSourceId<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.as_str() { + None => write!(f, "AssetSourceId::Default"), + Some(v) => write!(f, "AssetSourceId::Name({v})"), + } + } +} + +impl<'a> AssetSourceId<'a> { + /// Creates a new [`AssetSourceId`] + pub fn new(source: Option>>) -> AssetSourceId<'a> { + match source { + Some(source) => AssetSourceId::Name(source.into()), + None => AssetSourceId::Default, + } + } + + /// Returns [`None`] if this is [`AssetSourceId::Default`] and [`Some`] containing the + /// the name if this is [`AssetSourceId::Name`]. + pub fn as_str(&self) -> Option<&str> { + match self { + AssetSourceId::Default => None, + AssetSourceId::Name(v) => Some(v), + } + } + + /// If this is not already an owned / static id, create one. Otherwise, it will return itself (with a static lifetime). + pub fn into_owned(self) -> AssetSourceId<'static> { + match self { + AssetSourceId::Default => AssetSourceId::Default, + AssetSourceId::Name(v) => AssetSourceId::Name(v.into_owned()), + } + } + + /// Clones into an owned [`AssetSourceId<'static>`]. + /// This is equivalent to `.clone().into_owned()`. + #[inline] + pub fn clone_owned(&self) -> AssetSourceId<'static> { + self.clone().into_owned() + } +} + +impl From<&'static str> for AssetSourceId<'static> { + fn from(value: &'static str) -> Self { + AssetSourceId::Name(value.into()) + } +} + +impl<'a, 'b> From<&'a AssetSourceId<'b>> for AssetSourceId<'b> { + fn from(value: &'a AssetSourceId<'b>) -> Self { + value.clone() + } +} + +impl From> for AssetSourceId<'static> { + fn from(value: Option<&'static str>) -> Self { + match value { + Some(value) => AssetSourceId::Name(value.into()), + None => AssetSourceId::Default, + } + } +} + +impl From for AssetSourceId<'static> { + fn from(value: String) -> Self { + AssetSourceId::Name(value.into()) + } +} + +impl<'a> Hash for AssetSourceId<'a> { + fn hash(&self, state: &mut H) { + self.as_str().hash(state); + } +} + +impl<'a> PartialEq for AssetSourceId<'a> { + fn eq(&self, other: &Self) -> bool { + self.as_str().eq(&other.as_str()) + } +} + +/// Metadata about an "asset source", such as how to construct the [`AssetReader`] and [`AssetWriter`] for the source, +/// and whether or not the source is processed. +#[derive(Default)] +pub struct AssetSourceBuilder { + pub reader: Option Box + Send + Sync>>, + pub writer: Option Option> + Send + Sync>>, + pub watcher: Option< + Box< + dyn FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync, + >, + >, + pub processed_reader: Option Box + Send + Sync>>, + pub processed_writer: Option Option> + Send + Sync>>, + pub processed_watcher: Option< + Box< + dyn FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync, + >, + >, +} + +impl AssetSourceBuilder { + /// Builds a new [`AssetSource`] with the given `id`. If `watch` is true, the unprocessed source will watch for changes. + /// If `watch_processed` is true, the processed source will watch for changes. + pub fn build( + &mut self, + id: AssetSourceId<'static>, + watch: bool, + watch_processed: bool, + ) -> Option { + let reader = (self.reader.as_mut()?)(); + let writer = self.writer.as_mut().and_then(|w| (w)()); + let processed_writer = self.processed_writer.as_mut().and_then(|w| (w)()); + let mut source = AssetSource { + id: id.clone(), + reader, + writer, + processed_reader: self.processed_reader.as_mut().map(|r| (r)()), + processed_writer, + event_receiver: None, + watcher: None, + processed_event_receiver: None, + processed_watcher: None, + }; + + if watch { + let (sender, receiver) = crossbeam_channel::unbounded(); + match self.watcher.as_mut().and_then(|w|(w)(sender)) { + Some(w) => { + source.watcher = Some(w); + source.event_receiver = Some(receiver); + }, + None => warn!("{id} does not have an AssetWatcher configured. Consider enabling the `file_watcher` feature. Note that Web and Android do not currently support watching assets."), + } + } + + if watch_processed { + let (sender, receiver) = crossbeam_channel::unbounded(); + match self.processed_watcher.as_mut().and_then(|w|(w)(sender)) { + Some(w) => { + source.processed_watcher = Some(w); + source.processed_event_receiver = Some(receiver); + }, + None => warn!("{id} does not have a processed AssetWatcher configured. Consider enabling the `file_watcher` feature. Note that Web and Android do not currently support watching assets."), + } + } + Some(source) + } + + /// Will use the given `reader` function to construct unprocessed [`AssetReader`] instances. + pub fn with_reader( + mut self, + reader: impl FnMut() -> Box + Send + Sync + 'static, + ) -> Self { + self.reader = Some(Box::new(reader)); + self + } + + /// Will use the given `writer` function to construct unprocessed [`AssetWriter`] instances. + pub fn with_writer( + mut self, + writer: impl FnMut() -> Option> + Send + Sync + 'static, + ) -> Self { + self.writer = Some(Box::new(writer)); + self + } + + /// Will use the given `watcher` function to construct unprocessed [`AssetWatcher`] instances. + pub fn with_watcher( + mut self, + watcher: impl FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync + + 'static, + ) -> Self { + self.watcher = Some(Box::new(watcher)); + self + } + + /// Will use the given `reader` function to construct processed [`AssetReader`] instances. + pub fn with_processed_reader( + mut self, + reader: impl FnMut() -> Box + Send + Sync + 'static, + ) -> Self { + self.processed_reader = Some(Box::new(reader)); + self + } + + /// Will use the given `writer` function to construct processed [`AssetWriter`] instances. + pub fn with_processed_writer( + mut self, + writer: impl FnMut() -> Option> + Send + Sync + 'static, + ) -> Self { + self.processed_writer = Some(Box::new(writer)); + self + } + + /// Will use the given `watcher` function to construct processed [`AssetWatcher`] instances. + pub fn with_processed_watcher( + mut self, + watcher: impl FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync + + 'static, + ) -> Self { + self.processed_watcher = Some(Box::new(watcher)); + self + } + + /// Returns a builder containing the "platform default source" for the given `path` and `processed_path`. + /// For most platforms, this will use [`FileAssetReader`](crate::io::file::FileAssetReader) / [`FileAssetWriter`](crate::io::file::FileAssetWriter), + /// but some platforms (such as Android) have their own default readers / writers / watchers. + pub fn platform_default(path: &str, processed_path: Option<&str>) -> Self { + let default = Self::default() + .with_reader(AssetSource::get_default_reader(path.to_string())) + .with_writer(AssetSource::get_default_writer(path.to_string())) + .with_watcher(AssetSource::get_default_watcher( + path.to_string(), + Duration::from_millis(300), + )); + if let Some(processed_path) = processed_path { + default + .with_processed_reader(AssetSource::get_default_reader(processed_path.to_string())) + .with_processed_writer(AssetSource::get_default_writer(processed_path.to_string())) + .with_processed_watcher(AssetSource::get_default_watcher( + processed_path.to_string(), + Duration::from_millis(300), + )) + } else { + default + } + } +} + +/// A [`Resource`] that hold (repeatable) functions capable of producing new [`AssetReader`] and [`AssetWriter`] instances +/// for a given asset source. +#[derive(Resource, Default)] +pub struct AssetSourceBuilders { + sources: HashMap, AssetSourceBuilder>, + default: Option, +} + +impl AssetSourceBuilders { + /// Inserts a new builder with the given `id` + pub fn insert(&mut self, id: impl Into>, source: AssetSourceBuilder) { + match id.into() { + AssetSourceId::Default => { + self.default = Some(source); + } + AssetSourceId::Name(name) => { + self.sources.insert(name, source); + } + } + } + + /// Gets a mutable builder with the given `id`, if it exists. + pub fn get_mut<'a, 'b>( + &'a mut self, + id: impl Into>, + ) -> Option<&'a mut AssetSourceBuilder> { + match id.into() { + AssetSourceId::Default => self.default.as_mut(), + AssetSourceId::Name(name) => self.sources.get_mut(&name.into_owned()), + } + } + + /// Builds a new [`AssetSources`] collection. If `watch` is true, the unprocessed sources will watch for changes. + /// If `watch_processed` is true, the processed sources will watch for changes. + pub fn build_sources(&mut self, watch: bool, watch_processed: bool) -> AssetSources { + let mut sources = HashMap::new(); + for (id, source) in &mut self.sources { + if let Some(data) = source.build( + AssetSourceId::Name(id.clone_owned()), + watch, + watch_processed, + ) { + sources.insert(id.clone_owned(), data); + } + } + + AssetSources { + sources, + default: self + .default + .as_mut() + .and_then(|p| p.build(AssetSourceId::Default, watch, watch_processed)) + .expect(MISSING_DEFAULT_SOURCE), + } + } + + /// Initializes the default [`AssetSourceBuilder`] if it has not already been set. + pub fn init_default_source(&mut self, path: &str, processed_path: Option<&str>) { + self.default + .get_or_insert_with(|| AssetSourceBuilder::platform_default(path, processed_path)); + } +} + +/// A collection of unprocessed and processed [`AssetReader`], [`AssetWriter`], and [`AssetWatcher`] instances +/// for a specific asset source, identified by an [`AssetSourceId`]. +pub struct AssetSource { + id: AssetSourceId<'static>, + reader: Box, + writer: Option>, + processed_reader: Option>, + processed_writer: Option>, + watcher: Option>, + processed_watcher: Option>, + event_receiver: Option>, + processed_event_receiver: Option>, +} + +impl AssetSource { + /// Starts building a new [`AssetSource`]. + pub fn build() -> AssetSourceBuilder { + AssetSourceBuilder::default() + } + + /// Returns this source's id. + #[inline] + pub fn id(&self) -> AssetSourceId<'static> { + self.id.clone() + } + + /// Return's this source's unprocessed [`AssetReader`]. + #[inline] + pub fn reader(&self) -> &dyn AssetReader { + &*self.reader + } + + /// Return's this source's unprocessed [`AssetWriter`], if it exists. + #[inline] + pub fn writer(&self) -> Result<&dyn AssetWriter, MissingAssetWriterError> { + self.writer + .as_deref() + .ok_or_else(|| MissingAssetWriterError(self.id.clone_owned())) + } + + /// Return's this source's processed [`AssetReader`], if it exists. + #[inline] + pub fn processed_reader(&self) -> Result<&dyn AssetReader, MissingProcessedAssetReaderError> { + self.processed_reader + .as_deref() + .ok_or_else(|| MissingProcessedAssetReaderError(self.id.clone_owned())) + } + + /// Return's this source's processed [`AssetWriter`], if it exists. + #[inline] + pub fn processed_writer(&self) -> Result<&dyn AssetWriter, MissingProcessedAssetWriterError> { + self.processed_writer + .as_deref() + .ok_or_else(|| MissingProcessedAssetWriterError(self.id.clone_owned())) + } + + /// Return's this source's unprocessed event receiver, if the source is currently watching for changes. + #[inline] + pub fn event_receiver(&self) -> Option<&crossbeam_channel::Receiver> { + self.event_receiver.as_ref() + } + + /// Return's this source's processed event receiver, if the source is currently watching for changes. + #[inline] + pub fn processed_event_receiver( + &self, + ) -> Option<&crossbeam_channel::Receiver> { + self.processed_event_receiver.as_ref() + } + + /// Returns true if the assets in this source should be processed. + #[inline] + pub fn should_process(&self) -> bool { + self.processed_writer.is_some() + } + + /// Returns a builder function for this platform's default [`AssetReader`]. `path` is the relative path to + /// the asset root. + pub fn get_default_reader(_path: String) -> impl FnMut() -> Box + Send + Sync { + move || { + #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] + return Box::new(super::file::FileAssetReader::new(&_path)); + #[cfg(target_arch = "wasm32")] + return Box::new(super::wasm::HttpWasmAssetReader::new(&_path)); + #[cfg(target_os = "android")] + return Box::new(super::android::AndroidAssetReader); + } + } + + /// Returns a builder function for this platform's default [`AssetWriter`]. `path` is the relative path to + /// the asset root. This will return [`None`] if this platform does not support writing assets by default. + pub fn get_default_writer( + _path: String, + ) -> impl FnMut() -> Option> + Send + Sync { + move || { + #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] + return Some(Box::new(super::file::FileAssetWriter::new(&_path))); + #[cfg(any(target_arch = "wasm32", target_os = "android"))] + return None; + } + } + + /// Returns a builder function for this platform's default [`AssetWatcher`]. `path` is the relative path to + /// the asset root. This will return [`None`] if this platform does not support watching assets by default. + /// `file_debounce_time` is the amount of time to wait (and debounce duplicate events) before returning an event. + /// Higher durations reduce duplicates but increase the amount of time before a change event is processed. If the + /// duration is set too low, some systems might surface events _before_ their filesystem has the changes. + #[allow(unused)] + pub fn get_default_watcher( + path: String, + file_debounce_wait_time: Duration, + ) -> impl FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync { + move |sender: crossbeam_channel::Sender| { + #[cfg(all( + feature = "file_watcher", + not(target_arch = "wasm32"), + not(target_os = "android") + ))] + return Some(Box::new( + super::file::FileWatcher::new( + std::path::PathBuf::from(path.clone()), + sender, + file_debounce_wait_time, + ) + .unwrap(), + )); + #[cfg(any( + not(feature = "file_watcher"), + target_arch = "wasm32", + target_os = "android" + ))] + return None; + } + } + + /// This will cause processed [`AssetReader`] futures (such as [`AssetReader::read`]) to wait until + /// the [`AssetProcessor`](crate::AssetProcessor) has finished processing the requested asset. + pub fn gate_on_processor(&mut self, processor_data: Arc) { + if let Some(reader) = self.processed_reader.take() { + self.processed_reader = Some(Box::new(ProcessorGatedReader::new( + self.id(), + reader, + processor_data, + ))); + } + } +} + +/// A collection of [`AssetSources`]. +pub struct AssetSources { + sources: HashMap, AssetSource>, + default: AssetSource, +} + +impl AssetSources { + /// Gets the [`AssetSource`] with the given `id`, if it exists. + pub fn get<'a, 'b>( + &'a self, + id: impl Into>, + ) -> Result<&'a AssetSource, MissingAssetSourceError> { + match id.into().into_owned() { + AssetSourceId::Default => Ok(&self.default), + AssetSourceId::Name(name) => self + .sources + .get(&name) + .ok_or_else(|| MissingAssetSourceError(AssetSourceId::Name(name))), + } + } + + /// Iterates all asset sources in the collection (including the default source). + pub fn iter(&self) -> impl Iterator { + self.sources.values().chain(Some(&self.default)) + } + + /// Mutably iterates all asset sources in the collection (including the default source). + pub fn iter_mut(&mut self) -> impl Iterator { + self.sources.values_mut().chain(Some(&mut self.default)) + } + + /// Iterates all processed asset sources in the collection (including the default source). + pub fn iter_processed(&self) -> impl Iterator { + self.iter().filter(|p| p.should_process()) + } + + /// Mutably iterates all processed asset sources in the collection (including the default source). + pub fn iter_processed_mut(&mut self) -> impl Iterator { + self.iter_mut().filter(|p| p.should_process()) + } + + /// Iterates over the [`AssetSourceId`] of every [`AssetSource`] in the collection (including the default source). + pub fn ids(&self) -> impl Iterator> + '_ { + self.sources + .keys() + .map(|k| AssetSourceId::Name(k.clone_owned())) + .chain(Some(AssetSourceId::Default)) + } + + /// This will cause processed [`AssetReader`] futures (such as [`AssetReader::read`]) to wait until + /// the [`AssetProcessor`](crate::AssetProcessor) has finished processing the requested asset. + pub fn gate_on_processor(&mut self, processor_data: Arc) { + for source in self.iter_processed_mut() { + source.gate_on_processor(processor_data.clone()); + } + } +} + +/// An error returned when an [`AssetSource`] does not exist for a given id. +#[derive(Error, Debug)] +#[error("Asset Source '{0}' does not exist")] +pub struct MissingAssetSourceError(AssetSourceId<'static>); + +/// An error returned when an [`AssetWriter`] does not exist for a given id. +#[derive(Error, Debug)] +#[error("Asset Source '{0}' does not have an AssetWriter.")] +pub struct MissingAssetWriterError(AssetSourceId<'static>); + +/// An error returned when a processed [`AssetReader`] does not exist for a given id. +#[derive(Error, Debug)] +#[error("Asset Source '{0}' does not have a processed AssetReader.")] +pub struct MissingProcessedAssetReaderError(AssetSourceId<'static>); + +/// An error returned when a processed [`AssetWriter`] does not exist for a given id. +#[derive(Error, Debug)] +#[error("Asset Source '{0}' does not have a processed AssetWriter.")] +pub struct MissingProcessedAssetWriterError(AssetSourceId<'static>); + +const MISSING_DEFAULT_SOURCE: &str = + "A default AssetSource is required. Add one to `AssetSourceBuilders`"; diff --git a/crates/bevy_asset/src/io/wasm.rs b/crates/bevy_asset/src/io/wasm.rs index f0acbae067d6c..99ff39799b087 100644 --- a/crates/bevy_asset/src/io/wasm.rs +++ b/crates/bevy_asset/src/io/wasm.rs @@ -1,6 +1,5 @@ use crate::io::{ - get_meta_path, AssetReader, AssetReaderError, AssetWatcher, EmptyPathStream, PathStream, - Reader, VecReader, + get_meta_path, AssetReader, AssetReaderError, EmptyPathStream, PathStream, Reader, VecReader, }; use bevy_log::error; use bevy_utils::BoxedFuture; @@ -99,11 +98,4 @@ impl AssetReader for HttpWasmAssetReader { error!("Reading directories is not supported with the HttpWasmAssetReader"); Box::pin(async move { Ok(false) }) } - - fn watch_for_changes( - &self, - _event_sender: crossbeam_channel::Sender, - ) -> Option> { - None - } } diff --git a/crates/bevy_asset/src/lib.rs b/crates/bevy_asset/src/lib.rs index 31bdc3a2e62ca..58a2a191108c4 100644 --- a/crates/bevy_asset/src/lib.rs +++ b/crates/bevy_asset/src/lib.rs @@ -8,7 +8,7 @@ pub mod saver; pub mod prelude { #[doc(hidden)] pub use crate::{ - Asset, AssetApp, AssetEvent, AssetId, AssetPlugin, AssetServer, Assets, Handle, + Asset, AssetApp, AssetEvent, AssetId, AssetMode, AssetPlugin, AssetServer, Assets, Handle, UntypedHandle, }; } @@ -38,153 +38,145 @@ pub use server::*; pub use bevy_utils::BoxedFuture; use crate::{ - io::{processor_gated::ProcessorGatedReader, AssetProvider, AssetProviders}, + io::{embedded::EmbeddedAssetRegistry, AssetSourceBuilder, AssetSourceBuilders, AssetSourceId}, processor::{AssetProcessor, Process}, }; -use bevy_app::{App, First, MainScheduleOrder, Plugin, PostUpdate, Startup}; +use bevy_app::{App, First, MainScheduleOrder, Plugin, PostUpdate}; use bevy_ecs::{ reflect::AppTypeRegistry, schedule::{IntoSystemConfigs, IntoSystemSetConfigs, ScheduleLabel, SystemSet}, world::FromWorld, }; +use bevy_log::error; use bevy_reflect::{FromReflect, GetTypeRegistration, Reflect, TypePath}; use std::{any::TypeId, sync::Arc}; -/// Provides "asset" loading and processing functionality. An [`Asset`] is a "runtime value" that is loaded from an [`AssetProvider`], +/// Provides "asset" loading and processing functionality. An [`Asset`] is a "runtime value" that is loaded from an [`AssetSource`], /// which can be something like a filesystem, a network, etc. /// -/// Supports flexible "modes", such as [`AssetPlugin::Processed`] and -/// [`AssetPlugin::Unprocessed`] that enable using the asset workflow that best suits your project. -pub enum AssetPlugin { - /// Loads assets without any "preprocessing" from the configured asset `source` (defaults to the `assets` folder). - Unprocessed { - source: AssetProvider, - watch_for_changes: bool, - }, - /// Loads "processed" assets from a given `destination` source (defaults to the `imported_assets/Default` folder). This should - /// generally only be used when distributing apps. Use [`AssetPlugin::ProcessedDev`] to develop apps that process assets, - /// then switch to [`AssetPlugin::Processed`] when deploying the apps. - Processed { - destination: AssetProvider, - watch_for_changes: bool, - }, - /// Starts an [`AssetProcessor`] in the background that reads assets from the `source` provider (defaults to the `assets` folder), - /// processes them according to their [`AssetMeta`], and writes them to the `destination` provider (defaults to the `imported_assets/Default` folder). +/// Supports flexible "modes", such as [`AssetMode::Processed`] and +/// [`AssetMode::Unprocessed`] that enable using the asset workflow that best suits your project. +/// +/// [`AssetSource`]: crate::io::AssetSource +pub struct AssetPlugin { + /// The default file path to use (relative to the project root) for unprocessed assets. + pub file_path: String, + /// The default file path to use (relative to the project root) for processed assets. + pub processed_file_path: String, + /// If set, will override the default "watch for changes" setting. By default "watch for changes" will be `false` unless + /// the `watch` cargo feature is set. `watch` can be enabled manually, or it will be automatically enabled if a specific watcher + /// like `file_watcher` is enabled. + /// + /// Most use cases should leave this set to [`None`] and enable a specific watcher feature such as `file_watcher` to enable + /// watching for dev-scenarios. + pub watch_for_changes_override: Option, + /// The [`AssetMode`] to use for this server. + pub mode: AssetMode, +} + +pub enum AssetMode { + /// Loads assets from their [`AssetSource`]'s default [`AssetReader`] without any "preprocessing". + /// + /// [`AssetReader`]: crate::io::AssetReader + /// [`AssetSource`]: crate::io::AssetSource + Unprocessed, + /// Assets will be "pre-processed". This enables assets to be imported / converted / optimized ahead of time. + /// + /// Assets will be read from their unprocessed [`AssetSource`] (defaults to the `assets` folder), + /// processed according to their [`AssetMeta`], and written to their processed [`AssetSource`] (defaults to the `imported_assets/Default` folder). /// - /// By default this will hot reload changes to the `source` provider, resulting in reprocessing the asset and reloading it in the [`App`]. + /// By default, this assumes the processor _has already been run_. It will load assets from their final processed [`AssetReader`]. + /// + /// When developing an app, you should enable the `asset_processor` cargo feature, which will run the asset processor at startup. This should generally + /// be used in combination with the `file_watcher` cargo feature, which enables hot-reloading of assets that have changed. When both features are enabled, + /// changes to "original/source assets" will be detected, the asset will be re-processed, and then the final processed asset will be hot-reloaded in the app. /// /// [`AssetMeta`]: crate::meta::AssetMeta - ProcessedDev { - source: AssetProvider, - destination: AssetProvider, - watch_for_changes: bool, - }, + /// [`AssetSource`]: crate::io::AssetSource + /// [`AssetReader`]: crate::io::AssetReader + Processed, } impl Default for AssetPlugin { fn default() -> Self { - Self::unprocessed() + Self { + mode: AssetMode::Unprocessed, + file_path: Self::DEFAULT_UNPROCESSED_FILE_PATH.to_string(), + processed_file_path: Self::DEFAULT_PROCESSED_FILE_PATH.to_string(), + watch_for_changes_override: None, + } } } impl AssetPlugin { - const DEFAULT_FILE_SOURCE: &'static str = "assets"; + const DEFAULT_UNPROCESSED_FILE_PATH: &'static str = "assets"; /// NOTE: this is in the Default sub-folder to make this forward compatible with "import profiles" /// and to allow us to put the "processor transaction log" at `imported_assets/log` - const DEFAULT_FILE_DESTINATION: &'static str = "imported_assets/Default"; - - /// Returns the default [`AssetPlugin::Processed`] configuration - pub fn processed() -> Self { - Self::Processed { - destination: Default::default(), - watch_for_changes: false, - } - } - - /// Returns the default [`AssetPlugin::ProcessedDev`] configuration - pub fn processed_dev() -> Self { - Self::ProcessedDev { - source: Default::default(), - destination: Default::default(), - watch_for_changes: true, - } - } - - /// Returns the default [`AssetPlugin::Unprocessed`] configuration - pub fn unprocessed() -> Self { - Self::Unprocessed { - source: Default::default(), - watch_for_changes: false, - } - } - - /// Enables watching for changes, which will hot-reload assets when they change. - pub fn watch_for_changes(mut self) -> Self { - match &mut self { - AssetPlugin::Unprocessed { - watch_for_changes, .. - } - | AssetPlugin::Processed { - watch_for_changes, .. - } - | AssetPlugin::ProcessedDev { - watch_for_changes, .. - } => *watch_for_changes = true, - }; - self - } + const DEFAULT_PROCESSED_FILE_PATH: &'static str = "imported_assets/Default"; } impl Plugin for AssetPlugin { fn build(&self, app: &mut App) { - app.init_schedule(UpdateAssets) - .init_schedule(AssetEvents) - .init_resource::(); + app.init_schedule(UpdateAssets).init_schedule(AssetEvents); + let embedded = EmbeddedAssetRegistry::default(); { - match self { - AssetPlugin::Unprocessed { - source, - watch_for_changes, - } => { - let source_reader = app - .world - .resource_mut::() - .get_source_reader(source); - app.insert_resource(AssetServer::new(source_reader, *watch_for_changes)); - } - AssetPlugin::Processed { - destination, - watch_for_changes, - } => { - let destination_reader = app - .world - .resource_mut::() - .get_destination_reader(destination); - app.insert_resource(AssetServer::new(destination_reader, *watch_for_changes)); + let mut sources = app + .world + .get_resource_or_insert_with::(Default::default); + sources.init_default_source( + &self.file_path, + (!matches!(self.mode, AssetMode::Unprocessed)) + .then_some(self.processed_file_path.as_str()), + ); + embedded.register_source(&mut sources); + } + { + let mut watch = cfg!(feature = "watch"); + if let Some(watch_override) = self.watch_for_changes_override { + watch = watch_override; + } + match self.mode { + AssetMode::Unprocessed => { + let mut builders = app.world.resource_mut::(); + let sources = builders.build_sources(watch, false); + app.insert_resource(AssetServer::new( + sources, + AssetServerMode::Unprocessed, + watch, + )); } - AssetPlugin::ProcessedDev { - source, - destination, - watch_for_changes, - } => { - let mut asset_providers = app.world.resource_mut::(); - let processor = AssetProcessor::new(&mut asset_providers, source, destination); - let destination_reader = asset_providers.get_destination_reader(source); - // the main asset server gates loads based on asset state - let gated_reader = - ProcessorGatedReader::new(destination_reader, processor.data.clone()); - // the main asset server shares loaders with the processor asset server - app.insert_resource(AssetServer::new_with_loaders( - Box::new(gated_reader), - processor.server().data.loaders.clone(), - *watch_for_changes, - )) - .insert_resource(processor) - .add_systems(Startup, AssetProcessor::start); + AssetMode::Processed => { + #[cfg(feature = "asset_processor")] + { + let mut builders = app.world.resource_mut::(); + let processor = AssetProcessor::new(&mut builders); + let mut sources = builders.build_sources(false, watch); + sources.gate_on_processor(processor.data.clone()); + // the main asset server shares loaders with the processor asset server + app.insert_resource(AssetServer::new_with_loaders( + sources, + processor.server().data.loaders.clone(), + AssetServerMode::Processed, + watch, + )) + .insert_resource(processor) + .add_systems(bevy_app::Startup, AssetProcessor::start); + } + #[cfg(not(feature = "asset_processor"))] + { + let mut builders = app.world.resource_mut::(); + let sources = builders.build_sources(false, watch); + app.insert_resource(AssetServer::new( + sources, + AssetServerMode::Processed, + watch, + )); + } } } } - app.init_asset::() + app.insert_resource(embedded) + .init_asset::() .init_asset::<()>() .configure_sets( UpdateAssets, @@ -254,6 +246,15 @@ pub trait AssetApp { fn register_asset_loader(&mut self, loader: L) -> &mut Self; /// Registers the given `processor` in the [`App`]'s [`AssetProcessor`]. fn register_asset_processor(&mut self, processor: P) -> &mut Self; + /// Registers the given [`AssetSourceBuilder`] with the given `id`. + /// + /// Note that asset sources must be registered before adding [`AssetPlugin`] to your application, + /// since registered asset sources are built at that point and not after. + fn register_asset_source( + &mut self, + id: impl Into>, + source: AssetSourceBuilder, + ) -> &mut Self; /// Sets the default asset processor for the given `extension`. fn set_default_asset_processor(&mut self, extension: &str) -> &mut Self; /// Initializes the given loader in the [`App`]'s [`AssetServer`]. @@ -350,6 +351,26 @@ impl AssetApp for App { } self } + + fn register_asset_source( + &mut self, + id: impl Into>, + source: AssetSourceBuilder, + ) -> &mut Self { + let id = id.into(); + if self.world.get_resource::().is_some() { + error!("{} must be registered before `AssetPlugin` (typically added as part of `DefaultPlugins`)", id); + } + + { + let mut sources = self + .world + .get_resource_or_insert_with(AssetSourceBuilders::default); + sources.insert(id, source); + } + + self + } } /// A system set that holds all "track asset" operations. @@ -366,55 +387,6 @@ pub struct UpdateAssets; #[derive(Debug, Hash, PartialEq, Eq, Clone, ScheduleLabel)] pub struct AssetEvents; -/// Loads an "internal" asset by embedding the string stored in the given `path_str` and associates it with the given handle. -#[macro_export] -macro_rules! load_internal_asset { - ($app: ident, $handle: expr, $path_str: expr, $loader: expr) => {{ - let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); - assets.insert($handle, ($loader)( - include_str!($path_str), - std::path::Path::new(file!()) - .parent() - .unwrap() - .join($path_str) - .to_string_lossy() - )); - }}; - // we can't support params without variadic arguments, so internal assets with additional params can't be hot-reloaded - ($app: ident, $handle: ident, $path_str: expr, $loader: expr $(, $param:expr)+) => {{ - let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); - assets.insert($handle, ($loader)( - include_str!($path_str), - std::path::Path::new(file!()) - .parent() - .unwrap() - .join($path_str) - .to_string_lossy(), - $($param),+ - )); - }}; -} - -/// Loads an "internal" binary asset by embedding the bytes stored in the given `path_str` and associates it with the given handle. -#[macro_export] -macro_rules! load_internal_binary_asset { - ($app: ident, $handle: expr, $path_str: expr, $loader: expr) => {{ - let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); - assets.insert( - $handle, - ($loader)( - include_bytes!($path_str).as_ref(), - std::path::Path::new(file!()) - .parent() - .unwrap() - .join($path_str) - .to_string_lossy() - .into(), - ), - ); - }}; -} - #[cfg(test)] mod tests { use crate::{ @@ -424,12 +396,11 @@ mod tests { io::{ gated::{GateOpener, GatedReader}, memory::{Dir, MemoryAssetReader}, - Reader, + AssetSource, AssetSourceId, Reader, }, loader::{AssetLoader, LoadContext}, - Asset, AssetApp, AssetEvent, AssetId, AssetPath, AssetPlugin, AssetProvider, - AssetProviders, AssetServer, Assets, DependencyLoadState, LoadState, - RecursiveDependencyLoadState, + Asset, AssetApp, AssetEvent, AssetId, AssetPath, AssetPlugin, AssetServer, Assets, + DependencyLoadState, LoadState, RecursiveDependencyLoadState, }; use bevy_app::{App, Update}; use bevy_core::TaskPoolPlugin; @@ -534,17 +505,14 @@ mod tests { fn test_app(dir: Dir) -> (App, GateOpener) { let mut app = App::new(); let (gated_memory_reader, gate_opener) = GatedReader::new(MemoryAssetReader { root: dir }); - app.insert_resource( - AssetProviders::default() - .with_reader("Test", move || Box::new(gated_memory_reader.clone())), + app.register_asset_source( + AssetSourceId::Default, + AssetSource::build().with_reader(move || Box::new(gated_memory_reader.clone())), ) .add_plugins(( TaskPoolPlugin::default(), LogPlugin::default(), - AssetPlugin::Unprocessed { - source: AssetProvider::Custom("Test".to_string()), - watch_for_changes: false, - }, + AssetPlugin::default(), )); (app, gate_opener) } diff --git a/crates/bevy_asset/src/loader.rs b/crates/bevy_asset/src/loader.rs index 04be5ce834508..94ab97593dd28 100644 --- a/crates/bevy_asset/src/loader.rs +++ b/crates/bevy_asset/src/loader.rs @@ -1,11 +1,12 @@ use crate::{ - io::{AssetReaderError, Reader}, + io::{AssetReaderError, MissingAssetSourceError, MissingProcessedAssetReaderError, Reader}, meta::{ loader_settings_meta_transform, AssetHash, AssetMeta, AssetMetaDyn, ProcessedInfoMinimal, Settings, }, path::AssetPath, - Asset, AssetLoadError, AssetServer, Assets, Handle, UntypedAssetId, UntypedHandle, + Asset, AssetLoadError, AssetServer, AssetServerMode, Assets, Handle, UntypedAssetId, + UntypedHandle, }; use bevy_ecs::world::World; use bevy_utils::{BoxedFuture, CowArc, HashMap, HashSet}; @@ -367,7 +368,7 @@ impl<'a> LoadContext<'a> { ) -> Handle { let label = label.into(); let loaded_asset: ErasedLoadedAsset = loaded_asset.into(); - let labeled_path = self.asset_path.with_label(label.clone()); + let labeled_path = self.asset_path.clone().with_label(label.clone()); let handle = self .asset_server .get_or_create_path_handle(labeled_path, None); @@ -385,7 +386,7 @@ impl<'a> LoadContext<'a> { /// /// See [`AssetPath`] for more on labeled assets. pub fn has_labeled_asset<'b>(&self, label: impl Into>) -> bool { - let path = self.asset_path.with_label(label.into()); + let path = self.asset_path.clone().with_label(label.into()); self.asset_server.get_handle_untyped(&path).is_some() } @@ -412,15 +413,21 @@ impl<'a> LoadContext<'a> { } /// Gets the source asset path for this load context. - pub async fn read_asset_bytes<'b>( - &mut self, - path: &'b Path, + pub async fn read_asset_bytes<'b, 'c>( + &'b mut self, + path: impl Into>, ) -> Result, ReadAssetBytesError> { - let mut reader = self.asset_server.reader().read(path).await?; + let path = path.into(); + let source = self.asset_server.get_source(path.source())?; + let asset_reader = match self.asset_server.mode() { + AssetServerMode::Unprocessed { .. } => source.reader(), + AssetServerMode::Processed { .. } => source.processed_reader()?, + }; + let mut reader = asset_reader.read(path.path()).await?; let hash = if self.populate_hashes { // NOTE: ensure meta is read while the asset bytes reader is still active to ensure transactionality // See `ProcessorGatedReader` for more info - let meta_bytes = self.asset_server.reader().read_meta_bytes(path).await?; + let meta_bytes = asset_reader.read_meta_bytes(path.path()).await?; let minimal: ProcessedInfoMinimal = ron::de::from_bytes(&meta_bytes) .map_err(DeserializeMetaError::DeserializeMinimal)?; let processed_info = minimal @@ -432,8 +439,7 @@ impl<'a> LoadContext<'a> { }; let mut bytes = Vec::new(); reader.read_to_end(&mut bytes).await?; - self.loader_dependencies - .insert(AssetPath::from_path(path.to_owned()), hash); + self.loader_dependencies.insert(path.clone_owned(), hash); Ok(bytes) } @@ -480,7 +486,7 @@ impl<'a> LoadContext<'a> { &mut self, label: impl Into>, ) -> Handle { - let path = self.asset_path.with_label(label); + let path = self.asset_path.clone().with_label(label); let handle = self.asset_server.get_or_create_path_handle::(path, None); self.dependencies.insert(handle.id().untyped()); handle @@ -542,6 +548,10 @@ pub enum ReadAssetBytesError { DeserializeMetaError(#[from] DeserializeMetaError), #[error(transparent)] AssetReaderError(#[from] AssetReaderError), + #[error(transparent)] + MissingAssetSourceError(#[from] MissingAssetSourceError), + #[error(transparent)] + MissingProcessedAssetReaderError(#[from] MissingProcessedAssetReaderError), /// Encountered an I/O error while loading an asset. #[error("Encountered an io error while loading asset: {0}")] Io(#[from] std::io::Error), diff --git a/crates/bevy_asset/src/meta.rs b/crates/bevy_asset/src/meta.rs index e6d65b8ecd535..dbcd7d7feb57d 100644 --- a/crates/bevy_asset/src/meta.rs +++ b/crates/bevy_asset/src/meta.rs @@ -225,15 +225,14 @@ pub(crate) fn loader_settings_meta_transform( }) } -pub type AssetHash = [u8; 16]; +pub type AssetHash = [u8; 32]; /// NOTE: changing the hashing logic here is a _breaking change_ that requires a [`META_FORMAT_VERSION`] bump. pub(crate) fn get_asset_hash(meta_bytes: &[u8], asset_bytes: &[u8]) -> AssetHash { - let mut context = md5::Context::new(); - context.consume(meta_bytes); - context.consume(asset_bytes); - let digest = context.compute(); - digest.0 + let mut hasher = blake3::Hasher::new(); + hasher.update(meta_bytes); + hasher.update(asset_bytes); + *hasher.finalize().as_bytes() } /// NOTE: changing the hashing logic here is a _breaking change_ that requires a [`META_FORMAT_VERSION`] bump. @@ -241,11 +240,10 @@ pub(crate) fn get_full_asset_hash( asset_hash: AssetHash, dependency_hashes: impl Iterator, ) -> AssetHash { - let mut context = md5::Context::new(); - context.consume(asset_hash); + let mut hasher = blake3::Hasher::new(); + hasher.update(&asset_hash); for hash in dependency_hashes { - context.consume(hash); + hasher.update(&hash); } - let digest = context.compute(); - digest.0 + *hasher.finalize().as_bytes() } diff --git a/crates/bevy_asset/src/path.rs b/crates/bevy_asset/src/path.rs index efd12041148e3..a6cf38db73848 100644 --- a/crates/bevy_asset/src/path.rs +++ b/crates/bevy_asset/src/path.rs @@ -1,3 +1,4 @@ +use crate::io::AssetSourceId; use bevy_reflect::{ std_traits::ReflectDefault, utility::NonGenericTypeInfoCell, FromReflect, FromType, GetTypeRegistration, Reflect, ReflectDeserialize, ReflectFromPtr, ReflectFromReflect, @@ -12,10 +13,13 @@ use std::{ ops::Deref, path::{Path, PathBuf}, }; +use thiserror::Error; /// Represents a path to an asset in a "virtual filesystem". /// -/// Asset paths consist of two main parts: +/// Asset paths consist of three main parts: +/// * [`AssetPath::source`]: The name of the [`AssetSource`](crate::io::AssetSource) to load the asset from. +/// This is optional. If one is not set the default source will be used (which is the `assets` folder by default). /// * [`AssetPath::path`]: The "virtual filesystem path" pointing to an asset source file. /// * [`AssetPath::label`]: An optional "named sub asset". When assets are loaded, they are /// allowed to load "sub assets" of any type, which are identified by a named "label". @@ -33,20 +37,24 @@ use std::{ /// # struct Scene; /// # /// # let asset_server: AssetServer = panic!(); -/// // This loads the `my_scene.scn` base asset. +/// // This loads the `my_scene.scn` base asset from the default asset source. /// let scene: Handle = asset_server.load("my_scene.scn"); /// -/// // This loads the `PlayerMesh` labeled asset from the `my_scene.scn` base asset. +/// // This loads the `PlayerMesh` labeled asset from the `my_scene.scn` base asset in the default asset source. /// let mesh: Handle = asset_server.load("my_scene.scn#PlayerMesh"); +/// +/// // This loads the `my_scene.scn` base asset from a custom 'remote' asset source. +/// let scene: Handle = asset_server.load("remote://my_scene.scn"); /// ``` /// /// [`AssetPath`] implements [`From`] for `&'static str`, `&'static Path`, and `&'a String`, /// which allows us to optimize the static cases. /// This means that the common case of `asset_server.load("my_scene.scn")` when it creates and /// clones internal owned [`AssetPaths`](AssetPath). -/// This also means that you should use [`AssetPath::new`] in cases where `&str` is the explicit type. +/// This also means that you should use [`AssetPath::parse`] in cases where `&str` is the explicit type. #[derive(Eq, PartialEq, Hash, Clone, Default)] pub struct AssetPath<'a> { + source: AssetSourceId<'a>, path: CowArc<'a, Path>, label: Option>, } @@ -59,6 +67,9 @@ impl<'a> Debug for AssetPath<'a> { impl<'a> Display for AssetPath<'a> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if let AssetSourceId::Name(name) = self.source() { + write!(f, "{name}://")?; + } write!(f, "{}", self.path.display())?; if let Some(label) = &self.label { write!(f, "#{label}")?; @@ -67,44 +78,140 @@ impl<'a> Display for AssetPath<'a> { } } +#[derive(Error, Debug, PartialEq, Eq)] +pub enum ParseAssetPathError { + #[error("Asset source must be followed by '://'")] + InvalidSourceSyntax, + #[error("Asset source must be at least one character. Either specify the source before the '://' or remove the `://`")] + MissingSource, + #[error("Asset label must be at least one character. Either specify the label after the '#' or remove the '#'")] + MissingLabel, +} + impl<'a> AssetPath<'a> { /// Creates a new [`AssetPath`] from a string in the asset path format: /// * An asset at the root: `"scene.gltf"` /// * An asset nested in some folders: `"some/path/scene.gltf"` /// * An asset with a "label": `"some/path/scene.gltf#Mesh0"` + /// * An asset with a custom "source": `"custom://some/path/scene.gltf#Mesh0"` /// /// Prefer [`From<'static str>`] for static strings, as this will prevent allocations /// and reference counting for [`AssetPath::into_owned`]. - pub fn new(asset_path: &'a str) -> AssetPath<'a> { - let (path, label) = Self::get_parts(asset_path); - Self { + /// + /// # Panics + /// Panics if the asset path is in an invalid format. Use [`AssetPath::try_parse`] for a fallible variant + pub fn parse(asset_path: &'a str) -> AssetPath<'a> { + Self::try_parse(asset_path).unwrap() + } + + /// Creates a new [`AssetPath`] from a string in the asset path format: + /// * An asset at the root: `"scene.gltf"` + /// * An asset nested in some folders: `"some/path/scene.gltf"` + /// * An asset with a "label": `"some/path/scene.gltf#Mesh0"` + /// * An asset with a custom "source": `"custom://some/path/scene.gltf#Mesh0"` + /// + /// Prefer [`From<'static str>`] for static strings, as this will prevent allocations + /// and reference counting for [`AssetPath::into_owned`]. + /// + /// This will return a [`ParseAssetPathError`] if `asset_path` is in an invalid format. + pub fn try_parse(asset_path: &'a str) -> Result, ParseAssetPathError> { + let (source, path, label) = Self::parse_internal(asset_path).unwrap(); + Ok(Self { + source: match source { + Some(source) => AssetSourceId::Name(CowArc::Borrowed(source)), + None => AssetSourceId::Default, + }, path: CowArc::Borrowed(path), label: label.map(CowArc::Borrowed), - } + }) } - fn get_parts(asset_path: &str) -> (&Path, Option<&str>) { - let mut parts = asset_path.splitn(2, '#'); - let path = Path::new(parts.next().expect("Path must be set.")); - let label = parts.next(); - (path, label) + fn parse_internal( + asset_path: &str, + ) -> Result<(Option<&str>, &Path, Option<&str>), ParseAssetPathError> { + let mut chars = asset_path.char_indices(); + let mut source_range = None; + let mut path_range = 0..asset_path.len(); + let mut label_range = None; + while let Some((index, char)) = chars.next() { + match char { + ':' => { + let (_, char) = chars + .next() + .ok_or(ParseAssetPathError::InvalidSourceSyntax)?; + if char != '/' { + return Err(ParseAssetPathError::InvalidSourceSyntax); + } + let (index, char) = chars + .next() + .ok_or(ParseAssetPathError::InvalidSourceSyntax)?; + if char != '/' { + return Err(ParseAssetPathError::InvalidSourceSyntax); + } + source_range = Some(0..index - 2); + path_range.start = index + 1; + } + '#' => { + path_range.end = index; + label_range = Some(index + 1..asset_path.len()); + break; + } + _ => {} + } + } + + let source = match source_range { + Some(source_range) => { + if source_range.is_empty() { + return Err(ParseAssetPathError::MissingSource); + } + Some(&asset_path[source_range]) + } + None => None, + }; + let label = match label_range { + Some(label_range) => { + if label_range.is_empty() { + return Err(ParseAssetPathError::MissingLabel); + } + Some(&asset_path[label_range]) + } + None => None, + }; + + let path = Path::new(&asset_path[path_range]); + Ok((source, path, label)) } /// Creates a new [`AssetPath`] from a [`Path`]. #[inline] - pub fn from_path(path: impl Into>) -> AssetPath<'a> { + pub fn from_path(path: &'a Path) -> AssetPath<'a> { AssetPath { - path: path.into(), + path: CowArc::Borrowed(path), + source: AssetSourceId::Default, label: None, } } + /// Gets the "asset source", if one was defined. If none was defined, the default source + /// will be used. + #[inline] + pub fn source(&self) -> &AssetSourceId { + &self.source + } + /// Gets the "sub-asset label". #[inline] pub fn label(&self) -> Option<&str> { self.label.as_deref() } + /// Gets the "sub-asset label". + #[inline] + pub fn label_cow(&self) -> Option> { + self.label.clone() + } + /// Gets the path to the asset in the "virtual filesystem". #[inline] pub fn path(&self) -> &Path { @@ -115,6 +222,7 @@ impl<'a> AssetPath<'a> { #[inline] pub fn without_label(&self) -> AssetPath<'_> { Self { + source: self.source.clone(), path: self.path.clone(), label: None, } @@ -135,24 +243,62 @@ impl<'a> AssetPath<'a> { /// Returns this asset path with the given label. This will replace the previous /// label if it exists. #[inline] - pub fn with_label(&self, label: impl Into>) -> AssetPath<'a> { + pub fn with_label(self, label: impl Into>) -> AssetPath<'a> { AssetPath { - path: self.path.clone(), + source: self.source, + path: self.path, label: Some(label.into()), } } + /// Returns this asset path with the given asset source. This will replace the previous asset + /// source if it exists. + #[inline] + pub fn with_source(self, source: impl Into>) -> AssetPath<'a> { + AssetPath { + source: source.into(), + path: self.path, + label: self.label, + } + } + + /// Returns an [`AssetPath`] for the parent folder of this path, if there is a parent folder in the path. + pub fn parent(&self) -> Option> { + let path = match &self.path { + CowArc::Borrowed(path) => CowArc::Borrowed(path.parent()?), + CowArc::Static(path) => CowArc::Static(path.parent()?), + CowArc::Owned(path) => path.parent()?.to_path_buf().into(), + }; + Some(AssetPath { + source: self.source.clone(), + label: None, + path, + }) + } + /// Converts this into an "owned" value. If internally a value is borrowed, it will be cloned into an "owned [`Arc`]". - /// If it is already an "owned [`Arc`]", it will remain unchanged. + /// If internally a value is a static reference, the static reference will be used unchanged. + /// If internally a value is an "owned [`Arc`]", it will remain unchanged. /// /// [`Arc`]: std::sync::Arc pub fn into_owned(self) -> AssetPath<'static> { AssetPath { + source: self.source.into_owned(), path: self.path.into_owned(), label: self.label.map(|l| l.into_owned()), } } + /// Clones this into an "owned" value. If internally a value is borrowed, it will be cloned into an "owned [`Arc`]". + /// If internally a value is a static reference, the static reference will be used unchanged. + /// If internally a value is an "owned [`Arc`]", the [`Arc`] will be cloned. + /// + /// [`Arc`]: std::sync::Arc + #[inline] + pub fn clone_owned(&self) -> AssetPath<'static> { + self.clone().into_owned() + } + /// Returns the full extension (including multiple '.' values). /// Ex: Returns `"config.ron"` for `"my_asset.config.ron"` pub fn get_full_extension(&self) -> Option { @@ -176,8 +322,9 @@ impl<'a> AssetPath<'a> { impl From<&'static str> for AssetPath<'static> { #[inline] fn from(asset_path: &'static str) -> Self { - let (path, label) = Self::get_parts(asset_path); + let (source, path, label) = Self::parse_internal(asset_path).unwrap(); AssetPath { + source: source.into(), path: CowArc::Static(path), label: label.map(CowArc::Static), } @@ -187,14 +334,14 @@ impl From<&'static str> for AssetPath<'static> { impl<'a> From<&'a String> for AssetPath<'a> { #[inline] fn from(asset_path: &'a String) -> Self { - AssetPath::new(asset_path.as_str()) + AssetPath::parse(asset_path.as_str()) } } impl From for AssetPath<'static> { #[inline] fn from(asset_path: String) -> Self { - AssetPath::new(asset_path.as_str()).into_owned() + AssetPath::parse(asset_path.as_str()).into_owned() } } @@ -202,6 +349,7 @@ impl From<&'static Path> for AssetPath<'static> { #[inline] fn from(path: &'static Path) -> Self { Self { + source: AssetSourceId::Default, path: CowArc::Static(path), label: None, } @@ -212,6 +360,7 @@ impl From for AssetPath<'static> { #[inline] fn from(path: PathBuf) -> Self { Self { + source: AssetSourceId::Default, path: path.into(), label: None, } @@ -261,7 +410,7 @@ impl<'de> Visitor<'de> for AssetPathVisitor { where E: serde::de::Error, { - Ok(AssetPath::new(v).into_owned()) + Ok(AssetPath::parse(v).into_owned()) } fn visit_string(self, v: String) -> Result @@ -402,3 +551,36 @@ impl FromReflect for AssetPath<'static> { >(::as_any(reflect))?)) } } + +#[cfg(test)] +mod tests { + use crate::AssetPath; + use std::path::Path; + + #[test] + fn parse_asset_path() { + let result = AssetPath::parse_internal("a/b.test"); + assert_eq!(result, Ok((None, Path::new("a/b.test"), None))); + + let result = AssetPath::parse_internal("http://a/b.test"); + assert_eq!(result, Ok((Some("http"), Path::new("a/b.test"), None))); + + let result = AssetPath::parse_internal("http://a/b.test#Foo"); + assert_eq!( + result, + Ok((Some("http"), Path::new("a/b.test"), Some("Foo"))) + ); + + let result = AssetPath::parse_internal("http://"); + assert_eq!(result, Ok((Some("http"), Path::new(""), None))); + + let result = AssetPath::parse_internal("://x"); + assert_eq!(result, Err(crate::ParseAssetPathError::MissingSource)); + + let result = AssetPath::parse_internal("a/b.test#"); + assert_eq!(result, Err(crate::ParseAssetPathError::MissingLabel)); + + let result = AssetPath::parse_internal("http:/"); + assert_eq!(result, Err(crate::ParseAssetPathError::InvalidSourceSyntax)); + } +} diff --git a/crates/bevy_asset/src/processor/log.rs b/crates/bevy_asset/src/processor/log.rs index 0c1c3d93fbade..642de9b127142 100644 --- a/crates/bevy_asset/src/processor/log.rs +++ b/crates/bevy_asset/src/processor/log.rs @@ -1,15 +1,16 @@ +use crate::AssetPath; use async_fs::File; use bevy_log::error; use bevy_utils::HashSet; use futures_lite::{AsyncReadExt, AsyncWriteExt}; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use thiserror::Error; /// An in-memory representation of a single [`ProcessorTransactionLog`] entry. #[derive(Debug)] pub(crate) enum LogEntry { - BeginProcessing(PathBuf), - EndProcessing(PathBuf), + BeginProcessing(AssetPath<'static>), + EndProcessing(AssetPath<'static>), UnrecoverableError, } @@ -55,12 +56,12 @@ pub enum ValidateLogError { /// An error that occurs when validating individual [`ProcessorTransactionLog`] entries. #[derive(Error, Debug)] pub enum LogEntryError { - #[error("Encountered a duplicate process asset transaction: {0:?}")] - DuplicateTransaction(PathBuf), - #[error("A transaction was ended that never started {0:?}")] - EndedMissingTransaction(PathBuf), - #[error("An asset started processing but never finished: {0:?}")] - UnfinishedTransaction(PathBuf), + #[error("Encountered a duplicate process asset transaction: {0}")] + DuplicateTransaction(AssetPath<'static>), + #[error("A transaction was ended that never started {0}")] + EndedMissingTransaction(AssetPath<'static>), + #[error("An asset started processing but never finished: {0}")] + UnfinishedTransaction(AssetPath<'static>), } const LOG_PATH: &str = "imported_assets/log"; @@ -114,9 +115,13 @@ impl ProcessorTransactionLog { file.read_to_string(&mut string).await?; for line in string.lines() { if let Some(path_str) = line.strip_prefix(ENTRY_BEGIN) { - log_lines.push(LogEntry::BeginProcessing(PathBuf::from(path_str))); + log_lines.push(LogEntry::BeginProcessing( + AssetPath::parse(path_str).into_owned(), + )); } else if let Some(path_str) = line.strip_prefix(ENTRY_END) { - log_lines.push(LogEntry::EndProcessing(PathBuf::from(path_str))); + log_lines.push(LogEntry::EndProcessing( + AssetPath::parse(path_str).into_owned(), + )); } else if line.is_empty() { continue; } else { @@ -127,7 +132,7 @@ impl ProcessorTransactionLog { } pub(crate) async fn validate() -> Result<(), ValidateLogError> { - let mut transactions: HashSet = Default::default(); + let mut transactions: HashSet> = Default::default(); let mut errors: Vec = Vec::new(); let entries = Self::read().await?; for entry in entries { @@ -160,21 +165,27 @@ impl ProcessorTransactionLog { /// Logs the start of an asset being processed. If this is not followed at some point in the log by a closing [`ProcessorTransactionLog::end_processing`], /// in the next run of the processor the asset processing will be considered "incomplete" and it will be reprocessed. - pub(crate) async fn begin_processing(&mut self, path: &Path) -> Result<(), WriteLogError> { - self.write(&format!("{ENTRY_BEGIN}{}\n", path.to_string_lossy())) + pub(crate) async fn begin_processing( + &mut self, + path: &AssetPath<'_>, + ) -> Result<(), WriteLogError> { + self.write(&format!("{ENTRY_BEGIN}{path}\n")) .await .map_err(|e| WriteLogError { - log_entry: LogEntry::BeginProcessing(path.to_owned()), + log_entry: LogEntry::BeginProcessing(path.clone_owned()), error: e, }) } /// Logs the end of an asset being successfully processed. See [`ProcessorTransactionLog::begin_processing`]. - pub(crate) async fn end_processing(&mut self, path: &Path) -> Result<(), WriteLogError> { - self.write(&format!("{ENTRY_END}{}\n", path.to_string_lossy())) + pub(crate) async fn end_processing( + &mut self, + path: &AssetPath<'_>, + ) -> Result<(), WriteLogError> { + self.write(&format!("{ENTRY_END}{path}\n")) .await .map_err(|e| WriteLogError { - log_entry: LogEntry::EndProcessing(path.to_owned()), + log_entry: LogEntry::EndProcessing(path.clone_owned()), error: e, }) } diff --git a/crates/bevy_asset/src/processor/mod.rs b/crates/bevy_asset/src/processor/mod.rs index 07803ee6ba932..4e5b2a878ab00 100644 --- a/crates/bevy_asset/src/processor/mod.rs +++ b/crates/bevy_asset/src/processor/mod.rs @@ -6,15 +6,15 @@ pub use process::*; use crate::{ io::{ - processor_gated::ProcessorGatedReader, AssetProvider, AssetProviders, AssetReader, - AssetReaderError, AssetSourceEvent, AssetWatcher, AssetWriter, AssetWriterError, + AssetReader, AssetReaderError, AssetSource, AssetSourceBuilders, AssetSourceEvent, + AssetSourceId, AssetSources, AssetWriter, AssetWriterError, MissingAssetSourceError, }, meta::{ get_asset_hash, get_full_asset_hash, AssetAction, AssetActionMinimal, AssetHash, AssetMeta, AssetMetaDyn, AssetMetaMinimal, ProcessedInfo, ProcessedInfoMinimal, }, - AssetLoadError, AssetPath, AssetServer, DeserializeMetaError, - MissingAssetLoaderForExtensionError, CANNOT_WATCH_ERROR_MESSAGE, + AssetLoadError, AssetPath, AssetServer, AssetServerMode, DeserializeMetaError, + MissingAssetLoaderForExtensionError, }; use bevy_ecs::prelude::*; use bevy_log::{debug, error, trace, warn}; @@ -30,10 +30,10 @@ use std::{ }; use thiserror::Error; -/// A "background" asset processor that reads asset values from a source [`AssetProvider`] (which corresponds to an [`AssetReader`] / [`AssetWriter`] pair), -/// processes them in some way, and writes them to a destination [`AssetProvider`]. +/// A "background" asset processor that reads asset values from a source [`AssetSource`] (which corresponds to an [`AssetReader`] / [`AssetWriter`] pair), +/// processes them in some way, and writes them to a destination [`AssetSource`]. /// -/// This will create .meta files (a human-editable serialized form of [`AssetMeta`]) in the source [`AssetProvider`] for assets that +/// This will create .meta files (a human-editable serialized form of [`AssetMeta`]) in the source [`AssetSource`] for assets that /// that can be loaded and/or processed. This enables developers to configure how each asset should be loaded and/or processed. /// /// [`AssetProcessor`] can be run in the background while a Bevy App is running. Changes to assets will be automatically detected and hot-reloaded. @@ -58,37 +58,21 @@ pub struct AssetProcessorData { /// Default processors for file extensions default_processors: RwLock>, state: async_lock::RwLock, - source_reader: Box, - source_writer: Box, - destination_reader: Box, - destination_writer: Box, + sources: AssetSources, initialized_sender: async_broadcast::Sender<()>, initialized_receiver: async_broadcast::Receiver<()>, finished_sender: async_broadcast::Sender<()>, finished_receiver: async_broadcast::Receiver<()>, - source_event_receiver: crossbeam_channel::Receiver, - _source_watcher: Option>, } impl AssetProcessor { /// Creates a new [`AssetProcessor`] instance. - pub fn new( - providers: &mut AssetProviders, - source: &AssetProvider, - destination: &AssetProvider, - ) -> Self { - let data = Arc::new(AssetProcessorData::new( - providers.get_source_reader(source), - providers.get_source_writer(source), - providers.get_destination_reader(destination), - providers.get_destination_writer(destination), - )); - let destination_reader = providers.get_destination_reader(destination); + pub fn new(source: &mut AssetSourceBuilders) -> Self { + let data = Arc::new(AssetProcessorData::new(source.build_sources(true, false))); // The asset processor uses its own asset server with its own id space - let server = AssetServer::new( - Box::new(ProcessorGatedReader::new(destination_reader, data.clone())), - true, - ); + let mut sources = source.build_sources(false, false); + sources.gate_on_processor(data.clone()); + let server = AssetServer::new(sources, AssetServerMode::Processed, false); Self { server, data } } @@ -114,24 +98,18 @@ impl AssetProcessor { *self.data.state.read().await } - /// Retrieves the "source" [`AssetReader`] (the place where user-provided unprocessed "asset sources" are stored) - pub fn source_reader(&self) -> &dyn AssetReader { - &*self.data.source_reader - } - - /// Retrieves the "source" [`AssetWriter`] (the place where user-provided unprocessed "asset sources" are stored) - pub fn source_writer(&self) -> &dyn AssetWriter { - &*self.data.source_writer - } - - /// Retrieves the "destination" [`AssetReader`] (the place where processed / [`AssetProcessor`]-managed assets are stored) - pub fn destination_reader(&self) -> &dyn AssetReader { - &*self.data.destination_reader + /// Retrieves the [`AssetSource`] for this processor + #[inline] + pub fn get_source<'a, 'b>( + &'a self, + id: impl Into>, + ) -> Result<&'a AssetSource, MissingAssetSourceError> { + self.data.sources.get(id.into()) } - /// Retrieves the "destination" [`AssetWriter`] (the place where processed / [`AssetProcessor`]-managed assets are stored) - pub fn destination_writer(&self) -> &dyn AssetWriter { - &*self.data.destination_writer + #[inline] + pub fn sources(&self) -> &AssetSources { + &self.data.sources } /// Logs an unrecoverable error. On the next run of the processor, all assets will be regenerated. This should only be used as a last resort. @@ -144,14 +122,14 @@ impl AssetProcessor { /// Logs the start of an asset being processed. If this is not followed at some point in the log by a closing [`AssetProcessor::log_end_processing`], /// in the next run of the processor the asset processing will be considered "incomplete" and it will be reprocessed. - async fn log_begin_processing(&self, path: &Path) { + async fn log_begin_processing(&self, path: &AssetPath<'_>) { let mut log = self.data.log.write().await; let log = log.as_mut().unwrap(); log.begin_processing(path).await.unwrap(); } /// Logs the end of an asset being successfully processed. See [`AssetProcessor::log_begin_processing`]. - async fn log_end_processing(&self, path: &Path) { + async fn log_end_processing(&self, path: &AssetPath<'_>) { let mut log = self.data.log.write().await; let log = log.as_mut().unwrap(); log.end_processing(path).await.unwrap(); @@ -172,10 +150,11 @@ impl AssetProcessor { } /// Processes all assets. This will: + /// * For each "processed [`AssetSource`]: /// * Scan the [`ProcessorTransactionLog`] and recover from any failures detected - /// * Scan the destination [`AssetProvider`] to build the current view of already processed assets. - /// * Scan the source [`AssetProvider`] and remove any processed "destination" assets that are invalid or no longer exist. - /// * For each asset in the `source` [`AssetProvider`], kick off a new "process job", which will process the asset + /// * Scan the processed [`AssetReader`] to build the current view of already processed assets. + /// * Scan the unprocessed [`AssetReader`] and remove any final processed assets that are invalid or no longer exist. + /// * For each asset in the unprocessed [`AssetReader`], kick off a new "process job", which will process the asset /// (if the latest version of the asset has not been processed). #[cfg(all(not(target_arch = "wasm32"), feature = "multi-threaded"))] pub fn process_assets(&self) { @@ -184,8 +163,11 @@ impl AssetProcessor { IoTaskPool::get().scope(|scope| { scope.spawn(async move { self.initialize().await.unwrap(); - let path = PathBuf::from(""); - self.process_assets_internal(scope, path).await.unwrap(); + for source in self.sources().iter_processed() { + self.process_assets_internal(scope, source, PathBuf::from("")) + .await + .unwrap(); + } }); }); // This must happen _after_ the scope resolves or it will happen "too early" @@ -195,20 +177,24 @@ impl AssetProcessor { debug!("Processing finished in {:?}", end_time - start_time); } - /// Listens for changes to assets in the source [`AssetProvider`] and update state accordingly. + /// Listens for changes to assets in the source [`AssetSource`] and update state accordingly. // PERF: parallelize change event processing pub async fn listen_for_source_change_events(&self) { debug!("Listening for changes to source assets"); loop { let mut started_processing = false; - for event in self.data.source_event_receiver.try_iter() { - if !started_processing { - self.set_state(ProcessorState::Processing).await; - started_processing = true; - } + for source in self.data.sources.iter_processed() { + if let Some(receiver) = source.event_receiver() { + for event in receiver.try_iter() { + if !started_processing { + self.set_state(ProcessorState::Processing).await; + started_processing = true; + } - self.handle_asset_source_event(event).await; + self.handle_asset_source_event(source, event).await; + } + } } if started_processing { @@ -217,84 +203,91 @@ impl AssetProcessor { } } - async fn handle_asset_source_event(&self, event: AssetSourceEvent) { + async fn handle_asset_source_event(&self, source: &AssetSource, event: AssetSourceEvent) { trace!("{event:?}"); match event { AssetSourceEvent::AddedAsset(path) | AssetSourceEvent::AddedMeta(path) | AssetSourceEvent::ModifiedAsset(path) | AssetSourceEvent::ModifiedMeta(path) => { - self.process_asset(&path).await; + self.process_asset(source, path).await; } AssetSourceEvent::RemovedAsset(path) => { - self.handle_removed_asset(path).await; + self.handle_removed_asset(source, path).await; } AssetSourceEvent::RemovedMeta(path) => { - self.handle_removed_meta(&path).await; + self.handle_removed_meta(source, path).await; } AssetSourceEvent::AddedFolder(path) => { - self.handle_added_folder(path).await; + self.handle_added_folder(source, path).await; } // NOTE: As a heads up for future devs: this event shouldn't be run in parallel with other events that might // touch this folder (ex: the folder might be re-created with new assets). Clean up the old state first. // Currently this event handler is not parallel, but it could be (and likely should be) in the future. AssetSourceEvent::RemovedFolder(path) => { - self.handle_removed_folder(&path).await; + self.handle_removed_folder(source, &path).await; } AssetSourceEvent::RenamedAsset { old, new } => { // If there was a rename event, but the path hasn't changed, this asset might need reprocessing. // Sometimes this event is returned when an asset is moved "back" into the asset folder if old == new { - self.process_asset(&new).await; + self.process_asset(source, new).await; } else { - self.handle_renamed_asset(old, new).await; + self.handle_renamed_asset(source, old, new).await; } } AssetSourceEvent::RenamedMeta { old, new } => { // If there was a rename event, but the path hasn't changed, this asset meta might need reprocessing. // Sometimes this event is returned when an asset meta is moved "back" into the asset folder if old == new { - self.process_asset(&new).await; + self.process_asset(source, new).await; } else { debug!("Meta renamed from {old:?} to {new:?}"); let mut infos = self.data.asset_infos.write().await; // Renaming meta should not assume that an asset has also been renamed. Check both old and new assets to see // if they should be re-imported (and/or have new meta generated) - infos.check_reprocess_queue.push_back(old); - infos.check_reprocess_queue.push_back(new); + let new_asset_path = AssetPath::from(new).with_source(source.id()); + let old_asset_path = AssetPath::from(old).with_source(source.id()); + infos.check_reprocess_queue.push_back(old_asset_path); + infos.check_reprocess_queue.push_back(new_asset_path); } } AssetSourceEvent::RenamedFolder { old, new } => { // If there was a rename event, but the path hasn't changed, this asset folder might need reprocessing. // Sometimes this event is returned when an asset meta is moved "back" into the asset folder if old == new { - self.handle_added_folder(new).await; + self.handle_added_folder(source, new).await; } else { // PERF: this reprocesses everything in the moved folder. this is not necessary in most cases, but // requires some nuance when it comes to path handling. - self.handle_removed_folder(&old).await; - self.handle_added_folder(new).await; + self.handle_removed_folder(source, &old).await; + self.handle_added_folder(source, new).await; } } AssetSourceEvent::RemovedUnknown { path, is_meta } => { - match self.destination_reader().is_directory(&path).await { + let processed_reader = source.processed_reader().unwrap(); + match processed_reader.is_directory(&path).await { Ok(is_directory) => { if is_directory { - self.handle_removed_folder(&path).await; + self.handle_removed_folder(source, &path).await; } else if is_meta { - self.handle_removed_meta(&path).await; + self.handle_removed_meta(source, path).await; } else { - self.handle_removed_asset(path).await; + self.handle_removed_asset(source, path).await; } } Err(err) => { - if let AssetReaderError::NotFound(_) = err { - // if the path is not found, a processed version does not exist - } else { - error!( - "Path '{path:?}' as removed, but the destination reader could not determine if it \ - was a folder or a file due to the following error: {err}" - ); + match err { + AssetReaderError::NotFound(_) => { + // if the path is not found, a processed version does not exist + } + AssetReaderError::Io(err) => { + error!( + "Path '{}' was removed, but the destination reader could not determine if it \ + was a folder or a file due to the following error: {err}", + AssetPath::from_path(&path).with_source(source.id()) + ); + } } } } @@ -302,38 +295,44 @@ impl AssetProcessor { } } - async fn handle_added_folder(&self, path: PathBuf) { - debug!("Folder {:?} was added. Attempting to re-process", path); + async fn handle_added_folder(&self, source: &AssetSource, path: PathBuf) { + debug!( + "Folder {} was added. Attempting to re-process", + AssetPath::from_path(&path).with_source(source.id()) + ); #[cfg(any(target_arch = "wasm32", not(feature = "multi-threaded")))] error!("AddFolder event cannot be handled in single threaded mode (or WASM) yet."); #[cfg(all(not(target_arch = "wasm32"), feature = "multi-threaded"))] IoTaskPool::get().scope(|scope| { scope.spawn(async move { - self.process_assets_internal(scope, path).await.unwrap(); + self.process_assets_internal(scope, source, path) + .await + .unwrap(); }); }); } /// Responds to a removed meta event by reprocessing the asset at the given path. - async fn handle_removed_meta(&self, path: &Path) { + async fn handle_removed_meta(&self, source: &AssetSource, path: PathBuf) { // If meta was removed, we might need to regenerate it. // Likewise, the user might be manually re-adding the asset. // Therefore, we shouldn't automatically delete the asset ... that is a // user-initiated action. debug!( "Meta for asset {:?} was removed. Attempting to re-process", - path + AssetPath::from_path(&path).with_source(source.id()) ); - self.process_asset(path).await; + self.process_asset(source, path).await; } /// Removes all processed assets stored at the given path (respecting transactionality), then removes the folder itself. - async fn handle_removed_folder(&self, path: &Path) { + async fn handle_removed_folder(&self, source: &AssetSource, path: &Path) { debug!("Removing folder {:?} because source was removed", path); - match self.destination_reader().read_directory(path).await { + let processed_reader = source.processed_reader().unwrap(); + match processed_reader.read_directory(path).await { Ok(mut path_stream) => { while let Some(child_path) = path_stream.next().await { - self.handle_removed_asset(child_path).await; + self.handle_removed_asset(source, child_path).await; } } Err(err) => match err { @@ -349,28 +348,32 @@ impl AssetProcessor { } }, } - if let Err(AssetWriterError::Io(err)) = - self.destination_writer().remove_directory(path).await - { - // we can ignore NotFound because if the "final" file in a folder was removed - // then we automatically clean up this folder - if err.kind() != ErrorKind::NotFound { - error!("Failed to remove destination folder that no longer exists in asset source {path:?}: {err}"); + let processed_writer = source.processed_writer().unwrap(); + if let Err(err) = processed_writer.remove_directory(path).await { + match err { + AssetWriterError::Io(err) => { + // we can ignore NotFound because if the "final" file in a folder was removed + // then we automatically clean up this folder + if err.kind() != ErrorKind::NotFound { + let asset_path = AssetPath::from_path(path).with_source(source.id()); + error!("Failed to remove destination folder that no longer exists in {asset_path}: {err}"); + } + } } } } /// Removes the processed version of an asset and associated in-memory metadata. This will block until all existing reads/writes to the /// asset have finished, thanks to the `file_transaction_lock`. - async fn handle_removed_asset(&self, path: PathBuf) { - debug!("Removing processed {:?} because source was removed", path); - let asset_path = AssetPath::from_path(path); + async fn handle_removed_asset(&self, source: &AssetSource, path: PathBuf) { + let asset_path = AssetPath::from(path).with_source(source.id()); + debug!("Removing processed {asset_path} because source was removed"); let mut infos = self.data.asset_infos.write().await; if let Some(info) = infos.get(&asset_path) { // we must wait for uncontested write access to the asset source to ensure existing readers / writers // can finish their operations let _write_lock = info.file_transaction_lock.write(); - self.remove_processed_asset_and_meta(asset_path.path()) + self.remove_processed_asset_and_meta(source, asset_path.path()) .await; } infos.remove(&asset_path).await; @@ -378,22 +381,25 @@ impl AssetProcessor { /// Handles a renamed source asset by moving it's processed results to the new location and updating in-memory paths + metadata. /// This will cause direct path dependencies to break. - async fn handle_renamed_asset(&self, old: PathBuf, new: PathBuf) { + async fn handle_renamed_asset(&self, source: &AssetSource, old: PathBuf, new: PathBuf) { let mut infos = self.data.asset_infos.write().await; - let old_asset_path = AssetPath::from_path(old); - if let Some(info) = infos.get(&old_asset_path) { + let old = AssetPath::from(old).with_source(source.id()); + let new = AssetPath::from(new).with_source(source.id()); + let processed_writer = source.processed_writer().unwrap(); + if let Some(info) = infos.get(&old) { // we must wait for uncontested write access to the asset source to ensure existing readers / writers // can finish their operations let _write_lock = info.file_transaction_lock.write(); - let old = old_asset_path.path(); - self.destination_writer().rename(old, &new).await.unwrap(); - self.destination_writer() - .rename_meta(old, &new) + processed_writer + .rename(old.path(), new.path()) + .await + .unwrap(); + processed_writer + .rename_meta(old.path(), new.path()) .await .unwrap(); } - let new_asset_path = AssetPath::from_path(new); - infos.rename(&old_asset_path, &new_asset_path).await; + infos.rename(&old, &new).await; } async fn finish_processing_assets(&self) { @@ -408,19 +414,20 @@ impl AssetProcessor { fn process_assets_internal<'scope>( &'scope self, scope: &'scope bevy_tasks::Scope<'scope, '_, ()>, + source: &'scope AssetSource, path: PathBuf, ) -> bevy_utils::BoxedFuture<'scope, Result<(), AssetReaderError>> { Box::pin(async move { - if self.source_reader().is_directory(&path).await? { - let mut path_stream = self.source_reader().read_directory(&path).await?; + if source.reader().is_directory(&path).await? { + let mut path_stream = source.reader().read_directory(&path).await?; while let Some(path) = path_stream.next().await { - self.process_assets_internal(scope, path).await?; + self.process_assets_internal(scope, source, path).await?; } } else { // Files without extensions are skipped let processor = self.clone(); scope.spawn(async move { - processor.process_asset(&path).await; + processor.process_asset(source, path).await; }); } Ok(()) @@ -434,8 +441,9 @@ impl AssetProcessor { IoTaskPool::get().scope(|scope| { for path in check_reprocess_queue.drain(..) { let processor = self.clone(); + let source = self.get_source(path.source()).unwrap(); scope.spawn(async move { - processor.process_asset(&path).await; + processor.process_asset(source, path.into()).await; }); } }); @@ -471,7 +479,7 @@ impl AssetProcessor { processors.get(processor_type_name).cloned() } - /// Populates the initial view of each asset by scanning the source and destination folders. + /// Populates the initial view of each asset by scanning the unprocessed and processed asset folders. /// This info will later be used to determine whether or not to re-process an asset /// /// This will validate transactions and recover failed transactions when necessary. @@ -512,68 +520,81 @@ impl AssetProcessor { }) } - let mut source_paths = Vec::new(); - let source_reader = self.source_reader(); - get_asset_paths(source_reader, None, PathBuf::from(""), &mut source_paths) + for source in self.sources().iter_processed() { + let Ok(processed_reader) = source.processed_reader() else { + continue; + }; + let Ok(processed_writer) = source.processed_writer() else { + continue; + }; + let mut unprocessed_paths = Vec::new(); + get_asset_paths( + source.reader(), + None, + PathBuf::from(""), + &mut unprocessed_paths, + ) .await .map_err(InitializeError::FailedToReadSourcePaths)?; - let mut destination_paths = Vec::new(); - let destination_reader = self.destination_reader(); - let destination_writer = self.destination_writer(); - get_asset_paths( - destination_reader, - Some(destination_writer), - PathBuf::from(""), - &mut destination_paths, - ) - .await - .map_err(InitializeError::FailedToReadDestinationPaths)?; - - for path in &source_paths { - asset_infos.get_or_insert(AssetPath::from_path(path.clone())); - } + let mut processed_paths = Vec::new(); + get_asset_paths( + processed_reader, + Some(processed_writer), + PathBuf::from(""), + &mut processed_paths, + ) + .await + .map_err(InitializeError::FailedToReadDestinationPaths)?; - for path in &destination_paths { - let asset_path = AssetPath::from_path(path.clone()); - let mut dependencies = Vec::new(); - if let Some(info) = asset_infos.get_mut(&asset_path) { - match self.destination_reader().read_meta_bytes(path).await { - Ok(meta_bytes) => { - match ron::de::from_bytes::(&meta_bytes) { - Ok(minimal) => { - trace!( - "Populated processed info for asset {path:?} {:?}", - minimal.processed_info - ); + for path in unprocessed_paths { + asset_infos.get_or_insert(AssetPath::from(path).with_source(source.id())); + } - if let Some(processed_info) = &minimal.processed_info { - for process_dependency_info in - &processed_info.process_dependencies - { - dependencies.push(process_dependency_info.path.clone()); + for path in processed_paths { + let mut dependencies = Vec::new(); + let asset_path = AssetPath::from(path).with_source(source.id()); + if let Some(info) = asset_infos.get_mut(&asset_path) { + match processed_reader.read_meta_bytes(asset_path.path()).await { + Ok(meta_bytes) => { + match ron::de::from_bytes::(&meta_bytes) { + Ok(minimal) => { + trace!( + "Populated processed info for asset {asset_path} {:?}", + minimal.processed_info + ); + + if let Some(processed_info) = &minimal.processed_info { + for process_dependency_info in + &processed_info.process_dependencies + { + dependencies.push(process_dependency_info.path.clone()); + } } + info.processed_info = minimal.processed_info; + } + Err(err) => { + trace!("Removing processed data for {asset_path} because meta could not be parsed: {err}"); + self.remove_processed_asset_and_meta(source, asset_path.path()) + .await; } - info.processed_info = minimal.processed_info; - } - Err(err) => { - trace!("Removing processed data for {path:?} because meta could not be parsed: {err}"); - self.remove_processed_asset_and_meta(path).await; } } + Err(err) => { + trace!("Removing processed data for {asset_path} because meta failed to load: {err}"); + self.remove_processed_asset_and_meta(source, asset_path.path()) + .await; + } } - Err(err) => { - trace!("Removing processed data for {path:?} because meta failed to load: {err}"); - self.remove_processed_asset_and_meta(path).await; - } + } else { + trace!("Removing processed data for non-existent asset {asset_path}"); + self.remove_processed_asset_and_meta(source, asset_path.path()) + .await; } - } else { - trace!("Removing processed data for non-existent asset {path:?}"); - self.remove_processed_asset_and_meta(path).await; - } - for dependency in dependencies { - asset_infos.add_dependant(&dependency, asset_path.clone()); + for dependency in dependencies { + asset_infos.add_dependant(&dependency, asset_path.clone()); + } } } @@ -584,19 +605,20 @@ impl AssetProcessor { /// Removes the processed version of an asset and its metadata, if it exists. This _is not_ transactional like `remove_processed_asset_transactional`, nor /// does it remove existing in-memory metadata. - async fn remove_processed_asset_and_meta(&self, path: &Path) { - if let Err(err) = self.destination_writer().remove(path).await { + async fn remove_processed_asset_and_meta(&self, source: &AssetSource, path: &Path) { + if let Err(err) = source.processed_writer().unwrap().remove(path).await { warn!("Failed to remove non-existent asset {path:?}: {err}"); } - if let Err(err) = self.destination_writer().remove_meta(path).await { + if let Err(err) = source.processed_writer().unwrap().remove_meta(path).await { warn!("Failed to remove non-existent meta {path:?}: {err}"); } - self.clean_empty_processed_ancestor_folders(path).await; + self.clean_empty_processed_ancestor_folders(source, path) + .await; } - async fn clean_empty_processed_ancestor_folders(&self, path: &Path) { + async fn clean_empty_processed_ancestor_folders(&self, source: &AssetSource, path: &Path) { // As a safety precaution don't delete absolute paths to avoid deleting folders outside of the destination folder if path.is_absolute() { error!("Attempted to clean up ancestor folders of an absolute path. This is unsafe so the operation was skipped."); @@ -606,8 +628,9 @@ impl AssetProcessor { if parent == Path::new("") { break; } - if self - .destination_writer() + if source + .processed_writer() + .unwrap() .remove_empty_directory(parent) .await .is_err() @@ -624,33 +647,39 @@ impl AssetProcessor { /// to block reads until the asset is processed). /// /// [`LoadContext`]: crate::loader::LoadContext - async fn process_asset(&self, path: &Path) { - let result = self.process_asset_internal(path).await; + /// [`ProcessorGatedReader`]: crate::io::processor_gated::ProcessorGatedReader + async fn process_asset(&self, source: &AssetSource, path: PathBuf) { + let asset_path = AssetPath::from(path).with_source(source.id()); + let result = self.process_asset_internal(source, &asset_path).await; let mut infos = self.data.asset_infos.write().await; - let asset_path = AssetPath::from_path(path.to_owned()); infos.finish_processing(asset_path, result).await; } - async fn process_asset_internal(&self, path: &Path) -> Result { - if path.extension().is_none() { - return Err(ProcessError::ExtensionRequired); - } - let asset_path = AssetPath::from_path(path.to_path_buf()); + async fn process_asset_internal( + &self, + source: &AssetSource, + asset_path: &AssetPath<'static>, + ) -> Result { + // TODO: The extension check was removed now tht AssetPath is the input. is that ok? // TODO: check if already processing to protect against duplicate hot-reload events - debug!("Processing {:?}", path); + debug!("Processing {:?}", asset_path); let server = &self.server; + let path = asset_path.path(); + let reader = source.reader(); + + let reader_err = |err| ProcessError::AssetReaderError { + path: asset_path.clone(), + err, + }; + let writer_err = |err| ProcessError::AssetWriterError { + path: asset_path.clone(), + err, + }; // Note: we get the asset source reader first because we don't want to create meta files for assets that don't have source files - let mut reader = self.source_reader().read(path).await.map_err(|e| match e { - AssetReaderError::NotFound(_) => ProcessError::MissingAssetSource(path.to_owned()), - AssetReaderError::Io(err) => ProcessError::AssetSourceIoError(err), - })?; - - let (mut source_meta, meta_bytes, processor) = match self - .source_reader() - .read_meta_bytes(path) - .await - { + let mut byte_reader = reader.read(path).await.map_err(reader_err)?; + + let (mut source_meta, meta_bytes, processor) = match reader.read_meta_bytes(path).await { Ok(meta_bytes) => { let minimal: AssetMetaMinimal = ron::de::from_bytes(&meta_bytes).map_err(|e| { ProcessError::DeserializeMetaError(DeserializeMetaError::DeserializeMinimal(e)) @@ -684,7 +713,7 @@ impl AssetProcessor { let meta = processor.default_meta(); (meta, Some(processor)) } else { - match server.get_path_asset_loader(&asset_path).await { + match server.get_path_asset_loader(asset_path.clone()).await { Ok(loader) => (loader.default_meta(), None), Err(MissingAssetLoaderForExtensionError { .. }) => { let meta: Box = @@ -695,19 +724,31 @@ impl AssetProcessor { }; let meta_bytes = meta.serialize(); // write meta to source location if it doesn't already exist - self.source_writer() + source + .writer()? .write_meta_bytes(path, &meta_bytes) - .await?; + .await + .map_err(writer_err)?; (meta, meta_bytes, processor) } - Err(err) => return Err(ProcessError::ReadAssetMetaError(err)), + Err(err) => { + return Err(ProcessError::ReadAssetMetaError { + path: asset_path.clone(), + err, + }) + } }; + let processed_writer = source.processed_writer()?; + let mut asset_bytes = Vec::new(); - reader + byte_reader .read_to_end(&mut asset_bytes) .await - .map_err(ProcessError::AssetSourceIoError)?; + .map_err(|e| ProcessError::AssetReaderError { + path: asset_path.clone(), + err: AssetReaderError::Io(e), + })?; // PERF: in theory these hashes could be streamed if we want to avoid allocating the whole asset. // The downside is that reading assets would need to happen twice (once for the hash and once for the asset loader) @@ -722,7 +763,7 @@ impl AssetProcessor { { let infos = self.data.asset_infos.read().await; if let Some(current_processed_info) = infos - .get(&asset_path) + .get(asset_path) .and_then(|i| i.processed_info.as_ref()) { if current_processed_info.hash == new_hash { @@ -754,18 +795,24 @@ impl AssetProcessor { // NOTE: if processing the asset fails this will produce an "unfinished" log entry, forcing a rebuild on next run. // Directly writing to the asset destination in the processor necessitates this behavior // TODO: this class of failure can be recovered via re-processing + smarter log validation that allows for duplicate transactions in the event of failures - self.log_begin_processing(path).await; + self.log_begin_processing(asset_path).await; if let Some(processor) = processor { - let mut writer = self.destination_writer().write(path).await?; + let mut writer = processed_writer.write(path).await.map_err(writer_err)?; let mut processed_meta = { let mut context = - ProcessContext::new(self, &asset_path, &asset_bytes, &mut new_processed_info); + ProcessContext::new(self, asset_path, &asset_bytes, &mut new_processed_info); processor .process(&mut context, source_meta, &mut *writer) .await? }; - writer.flush().await.map_err(AssetWriterError::Io)?; + writer + .flush() + .await + .map_err(|e| ProcessError::AssetWriterError { + path: asset_path.clone(), + err: AssetWriterError::Io(e), + })?; let full_hash = get_full_asset_hash( new_hash, @@ -777,20 +824,23 @@ impl AssetProcessor { new_processed_info.full_hash = full_hash; *processed_meta.processed_info_mut() = Some(new_processed_info.clone()); let meta_bytes = processed_meta.serialize(); - self.destination_writer() + processed_writer .write_meta_bytes(path, &meta_bytes) - .await?; + .await + .map_err(writer_err)?; } else { - self.destination_writer() + processed_writer .write_bytes(path, &asset_bytes) - .await?; + .await + .map_err(writer_err)?; *source_meta.processed_info_mut() = Some(new_processed_info.clone()); let meta_bytes = source_meta.serialize(); - self.destination_writer() + processed_writer .write_meta_bytes(path, &meta_bytes) - .await?; + .await + .map_err(writer_err)?; } - self.log_end_processing(path).await; + self.log_end_processing(asset_path).await; Ok(ProcessResult::Processed(new_processed_info)) } @@ -818,27 +868,35 @@ impl AssetProcessor { } LogEntryError::UnfinishedTransaction(path) => { debug!("Asset {path:?} did not finish processing. Clearing state for that asset"); - if let Err(err) = self.destination_writer().remove(&path).await { + let mut unrecoverable_err = |message: &dyn std::fmt::Display| { + error!("Failed to remove asset {path:?}: {message}"); + state_is_valid = false; + }; + let Ok(source) = self.get_source(path.source()) else { + (unrecoverable_err)(&"AssetSource does not exist"); + continue; + }; + let Ok(processed_writer) = source.processed_writer() else { + (unrecoverable_err)(&"AssetSource does not have a processed AssetWriter registered"); + continue; + }; + + if let Err(err) = processed_writer.remove(path.path()).await { match err { AssetWriterError::Io(err) => { // any error but NotFound means we could be in a bad state if err.kind() != ErrorKind::NotFound { - error!("Failed to remove asset {path:?}: {err}"); - state_is_valid = false; + (unrecoverable_err)(&err); } } } } - if let Err(err) = self.destination_writer().remove_meta(&path).await - { + if let Err(err) = processed_writer.remove_meta(path.path()).await { match err { AssetWriterError::Io(err) => { // any error but NotFound means we could be in a bad state if err.kind() != ErrorKind::NotFound { - error!( - "Failed to remove asset meta {path:?}: {err}" - ); - state_is_valid = false; + (unrecoverable_err)(&err); } } } @@ -852,12 +910,16 @@ impl AssetProcessor { if !state_is_valid { error!("Processed asset transaction log state was invalid and unrecoverable for some reason (see previous logs). Removing processed assets and starting fresh."); - if let Err(err) = self - .destination_writer() - .remove_assets_in_directory(Path::new("")) - .await - { - panic!("Processed assets were in a bad state. To correct this, the asset processor attempted to remove all processed assets and start from scratch. This failed. There is no way to continue. Try restarting, or deleting imported asset folder manually. {err}"); + for source in self.sources().iter_processed() { + let Ok(processed_writer) = source.processed_writer() else { + continue; + }; + if let Err(err) = processed_writer + .remove_assets_in_directory(Path::new("")) + .await + { + panic!("Processed assets were in a bad state. To correct this, the asset processor attempted to remove all processed assets and start from scratch. This failed. There is no way to continue. Try restarting, or deleting imported asset folder manually. {err}"); + } } } } @@ -870,35 +932,20 @@ impl AssetProcessor { } impl AssetProcessorData { - pub fn new( - source_reader: Box, - source_writer: Box, - destination_reader: Box, - destination_writer: Box, - ) -> Self { + pub fn new(source: AssetSources) -> Self { let (mut finished_sender, finished_receiver) = async_broadcast::broadcast(1); let (mut initialized_sender, initialized_receiver) = async_broadcast::broadcast(1); // allow overflow on these "one slot" channels to allow receivers to retrieve the "latest" state, and to allow senders to // not block if there was older state present. finished_sender.set_overflow(true); initialized_sender.set_overflow(true); - let (source_event_sender, source_event_receiver) = crossbeam_channel::unbounded(); - // TODO: watching for changes could probably be entirely optional / we could just warn here - let source_watcher = source_reader.watch_for_changes(source_event_sender); - if source_watcher.is_none() { - error!("{}", CANNOT_WATCH_ERROR_MESSAGE); - } + AssetProcessorData { - source_reader, - source_writer, - destination_reader, - destination_writer, + sources: source, finished_sender, finished_receiver, initialized_sender, initialized_receiver, - source_event_receiver, - _source_watcher: source_watcher, state: async_lock::RwLock::new(ProcessorState::Initializing), log: Default::default(), processors: Default::default(), @@ -908,11 +955,11 @@ impl AssetProcessorData { } /// Returns a future that will not finish until the path has been processed. - pub async fn wait_until_processed(&self, path: &Path) -> ProcessStatus { + pub async fn wait_until_processed(&self, path: AssetPath<'static>) -> ProcessStatus { self.wait_until_initialized().await; let mut receiver = { let infos = self.asset_infos.write().await; - let info = infos.get(&AssetPath::from_path(path.to_path_buf())); + let info = infos.get(&path); match info { Some(info) => match info.status { Some(result) => return result, @@ -1038,7 +1085,7 @@ pub struct ProcessorAssetInfos { /// Therefore this _must_ always be consistent with the `infos` data. If a new asset is added to `infos`, it should /// check this maps for dependencies and add them. If an asset is removed, it should update the dependants here. non_existent_dependants: HashMap, HashSet>>, - check_reprocess_queue: VecDeque, + check_reprocess_queue: VecDeque>, } impl ProcessorAssetInfos { @@ -1100,7 +1147,7 @@ impl ProcessorAssetInfos { info.update_status(ProcessStatus::Processed).await; let dependants = info.dependants.iter().cloned().collect::>(); for path in dependants { - self.check_reprocess_queue.push_back(path.path().to_owned()); + self.check_reprocess_queue.push_back(path); } } Ok(ProcessResult::SkippedNotChanged) => { @@ -1118,20 +1165,21 @@ impl ProcessorAssetInfos { // Skip assets without extensions } Err(ProcessError::MissingAssetLoaderForExtension(_)) => { - trace!("No loader found for {:?}", asset_path); + trace!("No loader found for {asset_path}"); } - Err(ProcessError::MissingAssetSource(_)) => { + Err(ProcessError::AssetReaderError { + err: AssetReaderError::NotFound(_), + .. + }) => { // if there is no asset source, no processing can be done - trace!( - "No need to process asset {:?} because it does not exist", - asset_path - ); + trace!("No need to process asset {asset_path} because it does not exist"); } Err(err) => { - error!("Failed to process asset {:?}: {:?}", asset_path, err); + error!("Failed to process asset {asset_path}: {err}"); // if this failed because a dependency could not be loaded, make sure it is reprocessed if that dependency is reprocessed - if let ProcessError::AssetLoadError(AssetLoadError::CannotLoadDependency { + if let ProcessError::AssetLoadError(AssetLoadError::AssetLoaderError { path: dependency, + .. }) = err { let info = self.get_mut(&asset_path).expect("info should exist"); @@ -1220,10 +1268,10 @@ impl ProcessorAssetInfos { new_info.dependants.iter().cloned().collect() }; // Queue the asset for a reprocess check, in case it needs new meta. - self.check_reprocess_queue.push_back(new.path().to_owned()); + self.check_reprocess_queue.push_back(new.clone()); for dependant in dependants { // Queue dependants for reprocessing because they might have been waiting for this asset. - self.check_reprocess_queue.push_back(dependant.into()); + self.check_reprocess_queue.push_back(dependant); } } } diff --git a/crates/bevy_asset/src/processor/process.rs b/crates/bevy_asset/src/processor/process.rs index 0d0d3f468e2fb..ef6a3fbb2f5c5 100644 --- a/crates/bevy_asset/src/processor/process.rs +++ b/crates/bevy_asset/src/processor/process.rs @@ -1,5 +1,8 @@ use crate::{ - io::{AssetReaderError, AssetWriterError, Writer}, + io::{ + AssetReaderError, AssetWriterError, MissingAssetWriterError, + MissingProcessedAssetReaderError, MissingProcessedAssetWriterError, Writer, + }, meta::{AssetAction, AssetMeta, AssetMetaDyn, ProcessDependencyInfo, ProcessedInfo, Settings}, processor::AssetProcessor, saver::{AssetSaver, SavedAsset}, @@ -8,7 +11,7 @@ use crate::{ }; use bevy_utils::BoxedFuture; use serde::{Deserialize, Serialize}; -use std::{marker::PhantomData, path::PathBuf}; +use std::marker::PhantomData; use thiserror::Error; /// Asset "processor" logic that reads input asset bytes (stored on [`ProcessContext`]), processes the value in some way, @@ -70,20 +73,33 @@ pub struct LoadAndSaveSettings { /// An error that is encountered during [`Process::process`]. #[derive(Error, Debug)] pub enum ProcessError { - #[error("The asset source file for '{0}' does not exist")] - MissingAssetSource(PathBuf), - #[error(transparent)] - AssetSourceIoError(std::io::Error), #[error(transparent)] MissingAssetLoaderForExtension(#[from] MissingAssetLoaderForExtensionError), #[error(transparent)] MissingAssetLoaderForTypeName(#[from] MissingAssetLoaderForTypeNameError), #[error("The processor '{0}' does not exist")] MissingProcessor(String), + #[error("Encountered an AssetReader error for '{path}': {err}")] + AssetReaderError { + path: AssetPath<'static>, + err: AssetReaderError, + }, + #[error("Encountered an AssetWriter error for '{path}': {err}")] + AssetWriterError { + path: AssetPath<'static>, + err: AssetWriterError, + }, + #[error(transparent)] + MissingAssetWriterError(#[from] MissingAssetWriterError), + #[error(transparent)] + MissingProcessedAssetReaderError(#[from] MissingProcessedAssetReaderError), #[error(transparent)] - AssetWriterError(#[from] AssetWriterError), - #[error("Failed to read asset metadata {0:?}")] - ReadAssetMetaError(AssetReaderError), + MissingProcessedAssetWriterError(#[from] MissingProcessedAssetWriterError), + #[error("Failed to read asset metadata for {path}: {err}")] + ReadAssetMetaError { + path: AssetPath<'static>, + err: AssetReaderError, + }, #[error(transparent)] DeserializeMetaError(#[from] DeserializeMetaError), #[error(transparent)] diff --git a/crates/bevy_asset/src/server/info.rs b/crates/bevy_asset/src/server/info.rs index baa4007001e95..f41057f6221e9 100644 --- a/crates/bevy_asset/src/server/info.rs +++ b/crates/bevy_asset/src/server/info.rs @@ -69,6 +69,9 @@ pub(crate) struct AssetInfos { /// Tracks assets that depend on the "key" asset path inside their asset loaders ("loader dependencies") /// This should only be set when watching for changes to avoid unnecessary work. pub(crate) loader_dependants: HashMap, HashSet>>, + /// Tracks living labeled assets for a given source asset. + /// This should only be set when watching for changes to avoid unnecessary work. + pub(crate) living_labeled_assets: HashMap, HashSet>, pub(crate) handle_providers: HashMap, pub(crate) dependency_loaded_event_sender: HashMap, } @@ -88,6 +91,8 @@ impl AssetInfos { Self::create_handle_internal( &mut self.infos, &self.handle_providers, + &mut self.living_labeled_assets, + self.watching_for_changes, TypeId::of::(), None, None, @@ -107,6 +112,8 @@ impl AssetInfos { Self::create_handle_internal( &mut self.infos, &self.handle_providers, + &mut self.living_labeled_assets, + self.watching_for_changes, type_id, None, None, @@ -116,9 +123,12 @@ impl AssetInfos { ) } + #[allow(clippy::too_many_arguments)] fn create_handle_internal( infos: &mut HashMap, handle_providers: &HashMap, + living_labeled_assets: &mut HashMap, HashSet>, + watching_for_changes: bool, type_id: TypeId, path: Option>, meta_transform: Option, @@ -128,6 +138,16 @@ impl AssetInfos { .get(&type_id) .ok_or(MissingHandleProviderError(type_id))?; + if watching_for_changes { + if let Some(path) = &path { + let mut without_label = path.to_owned(); + if let Some(label) = without_label.take_label() { + let labels = living_labeled_assets.entry(without_label).or_default(); + labels.insert(label.to_string()); + } + } + } + let handle = provider.reserve_handle_internal(true, path.clone(), meta_transform); let mut info = AssetInfo::new(Arc::downgrade(&handle), path); if loading { @@ -136,6 +156,7 @@ impl AssetInfos { info.rec_dep_load_state = RecursiveDependencyLoadState::Loading; } infos.insert(handle.id, info); + Ok(UntypedHandle::Strong(handle)) } @@ -226,6 +247,8 @@ impl AssetInfos { let handle = Self::create_handle_internal( &mut self.infos, &self.handle_providers, + &mut self.living_labeled_assets, + self.watching_for_changes, type_id, Some(path), meta_transform, @@ -256,7 +279,7 @@ impl AssetInfos { Some(UntypedHandle::Strong(strong_handle)) } - /// Returns `true` if this path has + /// Returns `true` if the asset this path points to is still alive pub(crate) fn is_path_alive<'a>(&self, path: impl Into>) -> bool { let path = path.into(); if let Some(id) = self.path_to_id.get(&path) { @@ -267,12 +290,26 @@ impl AssetInfos { false } + /// Returns `true` if the asset at this path should be reloaded + pub(crate) fn should_reload(&self, path: &AssetPath) -> bool { + if self.is_path_alive(path) { + return true; + } + + if let Some(living) = self.living_labeled_assets.get(path) { + !living.is_empty() + } else { + false + } + } + // Returns `true` if the asset should be removed from the collection pub(crate) fn process_handle_drop(&mut self, id: UntypedAssetId) -> bool { Self::process_handle_drop_internal( &mut self.infos, &mut self.path_to_id, &mut self.loader_dependants, + &mut self.living_labeled_assets, self.watching_for_changes, id, ) @@ -521,6 +558,7 @@ impl AssetInfos { infos: &mut HashMap, path_to_id: &mut HashMap, UntypedAssetId>, loader_dependants: &mut HashMap, HashSet>>, + living_labeled_assets: &mut HashMap, HashSet>, watching_for_changes: bool, id: UntypedAssetId, ) -> bool { @@ -540,6 +578,18 @@ impl AssetInfos { dependants.remove(&path); } } + if let Some(label) = path.label() { + let mut without_label = path.to_owned(); + without_label.remove_label(); + if let Entry::Occupied(mut entry) = + living_labeled_assets.entry(without_label) + { + entry.get_mut().remove(label); + if entry.get().is_empty() { + entry.remove(); + } + }; + } } path_to_id.remove(&path); } @@ -566,6 +616,7 @@ impl AssetInfos { &mut self.infos, &mut self.path_to_id, &mut self.loader_dependants, + &mut self.living_labeled_assets, self.watching_for_changes, id.untyped(provider.type_id), ); diff --git a/crates/bevy_asset/src/server/mod.rs b/crates/bevy_asset/src/server/mod.rs index 121633cfade16..160a3e4d3228c 100644 --- a/crates/bevy_asset/src/server/mod.rs +++ b/crates/bevy_asset/src/server/mod.rs @@ -2,7 +2,10 @@ mod info; use crate::{ folder::LoadedFolder, - io::{AssetReader, AssetReaderError, AssetSourceEvent, AssetWatcher, Reader}, + io::{ + AssetReader, AssetReaderError, AssetSource, AssetSourceEvent, AssetSourceId, AssetSources, + MissingAssetSourceError, MissingProcessedAssetReaderError, Reader, + }, loader::{AssetLoader, ErasedAssetLoader, LoadContext, LoadedAsset}, meta::{ loader_settings_meta_transform, AssetActionMinimal, AssetMetaDyn, AssetMetaMinimal, @@ -48,52 +51,53 @@ pub(crate) struct AssetServerData { pub(crate) loaders: Arc>, asset_event_sender: Sender, asset_event_receiver: Receiver, - source_event_receiver: Receiver, - reader: Box, - _watcher: Option>, + sources: AssetSources, + mode: AssetServerMode, +} + +/// The "asset mode" the server is currently in. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum AssetServerMode { + /// This server loads unprocessed assets. + Unprocessed, + /// This server loads processed assets. + Processed, } impl AssetServer { /// Create a new instance of [`AssetServer`]. If `watch_for_changes` is true, the [`AssetReader`] storage will watch for changes to /// asset sources and hot-reload them. - pub fn new(reader: Box, watch_for_changes: bool) -> Self { - Self::new_with_loaders(reader, Default::default(), watch_for_changes) + pub fn new(sources: AssetSources, mode: AssetServerMode, watching_for_changes: bool) -> Self { + Self::new_with_loaders(sources, Default::default(), mode, watching_for_changes) } pub(crate) fn new_with_loaders( - reader: Box, + sources: AssetSources, loaders: Arc>, - watch_for_changes: bool, + mode: AssetServerMode, + watching_for_changes: bool, ) -> Self { let (asset_event_sender, asset_event_receiver) = crossbeam_channel::unbounded(); - let (source_event_sender, source_event_receiver) = crossbeam_channel::unbounded(); let mut infos = AssetInfos::default(); - let watcher = if watch_for_changes { - infos.watching_for_changes = true; - let watcher = reader.watch_for_changes(source_event_sender); - if watcher.is_none() { - error!("{}", CANNOT_WATCH_ERROR_MESSAGE); - } - watcher - } else { - None - }; + infos.watching_for_changes = watching_for_changes; Self { data: Arc::new(AssetServerData { - reader, - _watcher: watcher, + sources, + mode, asset_event_sender, asset_event_receiver, - source_event_receiver, loaders, infos: RwLock::new(infos), }), } } - /// Returns the primary [`AssetReader`]. - pub fn reader(&self) -> &dyn AssetReader { - &*self.data.reader + /// Retrieves the [`AssetReader`] for the given `source`. + pub fn get_source<'a>( + &'a self, + source: impl Into>, + ) -> Result<&'a AssetSource, MissingAssetSourceError> { + self.data.sources.get(source.into()) } /// Registers a new [`AssetLoader`]. [`AssetLoader`]s must be registered before they can be used. @@ -253,7 +257,7 @@ impl AssetServer { path: impl Into>, meta_transform: Option, ) -> Handle { - let mut path = path.into().into_owned(); + let path = path.into().into_owned(); let (handle, should_load) = self.data.infos.write().get_or_create_path_handle::( path.clone(), HandleLoadingMode::Request, @@ -261,13 +265,10 @@ impl AssetServer { ); if should_load { - let mut owned_handle = Some(handle.clone().untyped()); + let owned_handle = Some(handle.clone().untyped()); let server = self.clone(); IoTaskPool::get() .spawn(async move { - if path.take_label().is_some() { - owned_handle = None; - } if let Err(err) = server.load_internal(owned_handle, path, false, None).await { error!("{}", err); } @@ -287,6 +288,10 @@ impl AssetServer { self.load_internal(None, path, false, None).await } + /// Performs an async asset load. + /// + /// `input_handle` must only be [`Some`] if `should_load` was true when retrieving `input_handle`. This is an optimization to + /// avoid looking up `should_load` twice, but it means you _must_ be sure a load is necessary when calling this function with [`Some`]. async fn load_internal<'a>( &self, input_handle: Option, @@ -294,7 +299,7 @@ impl AssetServer { force: bool, meta_transform: Option, ) -> Result { - let mut path = path.into_owned(); + let path = path.into_owned(); let path_clone = path.clone(); let (mut meta, loader, mut reader) = self .get_meta_loader_and_reader(&path_clone) @@ -308,18 +313,8 @@ impl AssetServer { e })?; - let has_label = path.label().is_some(); - let (handle, should_load) = match input_handle { Some(handle) => { - if !has_label && handle.type_id() != loader.asset_type_id() { - return Err(AssetLoadError::RequestedHandleTypeMismatch { - path: path.into_owned(), - requested: handle.type_id(), - actual_asset_name: loader.asset_type_name(), - loader_name: loader.type_name(), - }); - } // if a handle was passed in, the "should load" check was already done (handle, true) } @@ -335,37 +330,51 @@ impl AssetServer { } }; + if path.label().is_none() && handle.type_id() != loader.asset_type_id() { + return Err(AssetLoadError::RequestedHandleTypeMismatch { + path: path.into_owned(), + requested: handle.type_id(), + actual_asset_name: loader.asset_type_name(), + loader_name: loader.type_name(), + }); + } + if !should_load && !force { return Ok(handle); } - let base_asset_id = if has_label { - path.remove_label(); - // If the path has a label, the current id does not match the asset root type. - // We need to get the actual asset id + + let (base_handle, base_path) = if path.label().is_some() { let mut infos = self.data.infos.write(); - let (actual_handle, _) = infos.get_or_create_path_handle_untyped( - path.clone(), + let base_path = path.without_label().into_owned(); + let (base_handle, _) = infos.get_or_create_path_handle_untyped( + base_path.clone(), loader.asset_type_id(), loader.asset_type_name(), - // ignore current load state ... we kicked off this sub asset load because it needed to be loaded but - // does not currently exist HandleLoadingMode::Force, None, ); - actual_handle.id() + (base_handle, base_path) } else { - handle.id() + (handle.clone(), path.clone()) }; - if let Some(meta_transform) = handle.meta_transform() { + if let Some(meta_transform) = base_handle.meta_transform() { (*meta_transform)(&mut *meta); } match self - .load_with_meta_loader_and_reader(&path, meta, &*loader, &mut *reader, true, false) + .load_with_meta_loader_and_reader(&base_path, meta, &*loader, &mut *reader, true, false) .await { Ok(mut loaded_asset) => { + if let Some(label) = path.label_cow() { + if !loaded_asset.labeled_assets.contains_key(&label) { + return Err(AssetLoadError::MissingLabel { + base_path, + label: label.to_string(), + }); + } + } for (_, labeled_asset) in loaded_asset.labeled_assets.drain() { self.send_asset_event(InternalAssetEvent::Loaded { id: labeled_asset.handle.id(), @@ -373,13 +382,15 @@ impl AssetServer { }); } self.send_asset_event(InternalAssetEvent::Loaded { - id: base_asset_id, + id: base_handle.id(), loaded_asset, }); Ok(handle) } Err(err) => { - self.send_asset_event(InternalAssetEvent::Failed { id: base_asset_id }); + self.send_asset_event(InternalAssetEvent::Failed { + id: base_handle.id(), + }); Err(err) } } @@ -391,7 +402,7 @@ impl AssetServer { let path = path.into().into_owned(); IoTaskPool::get() .spawn(async move { - if server.data.infos.read().is_path_alive(&path) { + if server.data.infos.read().should_reload(&path) { info!("Reloading {path} because it has changed"); if let Err(err) = server.load_internal(None, path, true, None).await { error!("{}", err); @@ -450,28 +461,33 @@ impl AssetServer { /// contain handles to all assets in the folder. You can wait for all assets to load by checking the [`LoadedFolder`]'s /// [`RecursiveDependencyLoadState`]. #[must_use = "not using the returned strong handle may result in the unexpected release of the assets"] - pub fn load_folder(&self, path: impl AsRef) -> Handle { + pub fn load_folder<'a>(&self, path: impl Into>) -> Handle { let handle = { let mut infos = self.data.infos.write(); infos.create_loading_handle::() }; let id = handle.id().untyped(); + let path = path.into().into_owned(); fn load_folder<'a>( + source: AssetSourceId<'static>, path: &'a Path, + reader: &'a dyn AssetReader, server: &'a AssetServer, handles: &'a mut Vec, ) -> bevy_utils::BoxedFuture<'a, Result<(), AssetLoadError>> { Box::pin(async move { - let is_dir = server.reader().is_directory(path).await?; + let is_dir = reader.is_directory(path).await?; if is_dir { - let mut path_stream = server.reader().read_directory(path.as_ref()).await?; + let mut path_stream = reader.read_directory(path.as_ref()).await?; while let Some(child_path) = path_stream.next().await { - if server.reader().is_directory(&child_path).await? { - load_folder(&child_path, server, handles).await?; + if reader.is_directory(&child_path).await? { + load_folder(source.clone(), &child_path, reader, server, handles) + .await?; } else { let path = child_path.to_str().expect("Path should be a valid string."); - match server.load_untyped_async(AssetPath::new(path)).await { + let asset_path = AssetPath::parse(path).with_source(source.clone()); + match server.load_untyped_async(asset_path).await { Ok(handle) => handles.push(handle), // skip assets that cannot be loaded Err( @@ -488,11 +504,32 @@ impl AssetServer { } let server = self.clone(); - let owned_path = path.as_ref().to_owned(); IoTaskPool::get() .spawn(async move { + let Ok(source) = server.get_source(path.source()) else { + error!( + "Failed to load {path}. AssetSource {:?} does not exist", + path.source() + ); + return; + }; + + let asset_reader = match server.data.mode { + AssetServerMode::Unprocessed { .. } => source.reader(), + AssetServerMode::Processed { .. } => match source.processed_reader() { + Ok(reader) => reader, + Err(_) => { + error!( + "Failed to load {path}. AssetSource {:?} does not have a processed AssetReader", + path.source() + ); + return; + } + }, + }; + let mut handles = Vec::new(); - match load_folder(&owned_path, &server, &mut handles).await { + match load_folder(source.id(), path.path(), asset_reader, &server, &mut handles).await { Ok(_) => server.send_asset_event(InternalAssetEvent::Loaded { id, loaded_asset: LoadedAsset::new_with_dependencies( @@ -586,6 +623,11 @@ impl AssetServer { Some(info.path.as_ref()?.clone()) } + /// Returns the [`AssetServerMode`] this server is currently in. + pub fn mode(&self) -> AssetServerMode { + self.data.mode + } + /// Pre-register a loader that will later be added. /// /// Assets loaded with matching extensions will be blocked until the @@ -641,34 +683,43 @@ impl AssetServer { ), AssetLoadError, > { + let source = self.get_source(asset_path.source())?; // NOTE: We grab the asset byte reader first to ensure this is transactional for AssetReaders like ProcessorGatedReader // The asset byte reader will "lock" the processed asset, preventing writes for the duration of the lock. // Then the meta reader, if meta exists, will correspond to the meta for the current "version" of the asset. // See ProcessedAssetInfo::file_transaction_lock for more context - let reader = self.data.reader.read(asset_path.path()).await?; - match self.data.reader.read_meta_bytes(asset_path.path()).await { + let asset_reader = match self.data.mode { + AssetServerMode::Unprocessed { .. } => source.reader(), + AssetServerMode::Processed { .. } => source.processed_reader()?, + }; + let reader = asset_reader.read(asset_path.path()).await?; + match asset_reader.read_meta_bytes(asset_path.path()).await { Ok(meta_bytes) => { // TODO: this isn't fully minimal yet. we only need the loader let minimal: AssetMetaMinimal = ron::de::from_bytes(&meta_bytes).map_err(|e| { - AssetLoadError::DeserializeMeta(DeserializeMetaError::DeserializeMinimal(e)) + AssetLoadError::DeserializeMeta { + path: asset_path.clone_owned(), + error: Box::new(DeserializeMetaError::DeserializeMinimal(e)), + } })?; let loader_name = match minimal.asset { AssetActionMinimal::Load { loader } => loader, AssetActionMinimal::Process { .. } => { return Err(AssetLoadError::CannotLoadProcessedAsset { - path: asset_path.clone().into_owned(), + path: asset_path.clone_owned(), }) } AssetActionMinimal::Ignore => { return Err(AssetLoadError::CannotLoadIgnoredAsset { - path: asset_path.clone().into_owned(), + path: asset_path.clone_owned(), }) } }; let loader = self.get_asset_loader_with_type_name(&loader_name).await?; - let meta = loader.deserialize_meta(&meta_bytes).map_err(|_| { - AssetLoadError::CannotLoadDependency { - path: asset_path.clone().into_owned(), + let meta = loader.deserialize_meta(&meta_bytes).map_err(|e| { + AssetLoadError::DeserializeMeta { + path: asset_path.clone_owned(), + error: Box::new(e), } })?; @@ -693,13 +744,16 @@ impl AssetServer { populate_hashes: bool, ) -> Result { // TODO: experiment with this - let asset_path = asset_path.clone().into_owned(); + let asset_path = asset_path.clone_owned(); let load_context = LoadContext::new(self, asset_path.clone(), load_dependencies, populate_hashes); - loader - .load(reader, meta, load_context) - .await - .map_err(|_| AssetLoadError::CannotLoadDependency { path: asset_path }) + loader.load(reader, meta, load_context).await.map_err(|e| { + AssetLoadError::AssetLoaderError { + path: asset_path.clone_owned(), + loader_name: loader.type_name(), + error: e, + } + }) } } @@ -742,17 +796,36 @@ pub fn handle_internal_asset_events(world: &mut World) { } let mut paths_to_reload = HashSet::new(); - for event in server.data.source_event_receiver.try_iter() { + let mut handle_event = |source: AssetSourceId<'static>, event: AssetSourceEvent| { match event { // TODO: if the asset was processed and the processed file was changed, the first modified event // should be skipped? AssetSourceEvent::ModifiedAsset(path) | AssetSourceEvent::ModifiedMeta(path) => { - let path = AssetPath::from_path(path); + let path = AssetPath::from(path).with_source(source); queue_ancestors(&path, &infos, &mut paths_to_reload); paths_to_reload.insert(path); } _ => {} } + }; + + for source in server.data.sources.iter() { + match server.data.mode { + AssetServerMode::Unprocessed { .. } => { + if let Some(receiver) = source.event_receiver() { + for event in receiver.try_iter() { + handle_event(source.id(), event); + } + } + } + AssetServerMode::Processed { .. } => { + if let Some(receiver) = source.processed_event_receiver() { + for event in receiver.try_iter() { + handle_event(source.id(), event); + } + } + } + } } for path in paths_to_reload { @@ -848,16 +921,32 @@ pub enum AssetLoadError { MissingAssetLoaderForTypeName(#[from] MissingAssetLoaderForTypeNameError), #[error(transparent)] AssetReaderError(#[from] AssetReaderError), + #[error(transparent)] + MissingAssetSourceError(#[from] MissingAssetSourceError), + #[error(transparent)] + MissingProcessedAssetReaderError(#[from] MissingProcessedAssetReaderError), #[error("Encountered an error while reading asset metadata bytes")] AssetMetaReadError, - #[error(transparent)] - DeserializeMeta(DeserializeMetaError), + #[error("Failed to deserialize meta for asset {path}: {error}")] + DeserializeMeta { + path: AssetPath<'static>, + error: Box, + }, #[error("Asset '{path}' is configured to be processed. It cannot be loaded directly.")] CannotLoadProcessedAsset { path: AssetPath<'static> }, #[error("Asset '{path}' is configured to be ignored. It cannot be loaded.")] CannotLoadIgnoredAsset { path: AssetPath<'static> }, - #[error("Asset '{path}' is a dependency. It cannot be loaded directly.")] - CannotLoadDependency { path: AssetPath<'static> }, + #[error("Failed to load asset '{path}' with asset loader '{loader_name}': {error}")] + AssetLoaderError { + path: AssetPath<'static>, + loader_name: &'static str, + error: Box, + }, + #[error("The file at '{base_path}' does not contain the labeled asset '{label}'.")] + MissingLabel { + base_path: AssetPath<'static>, + label: String, + }, } /// An error that occurs when an [`AssetLoader`] is not registered for a given extension. @@ -882,7 +971,7 @@ fn format_missing_asset_ext(exts: &[String]) -> String { exts.join(", ") ) } else { - String::new() + " for file with no extension".to_string() } } @@ -893,8 +982,3 @@ impl std::fmt::Debug for AssetServer { .finish() } } - -pub(crate) static CANNOT_WATCH_ERROR_MESSAGE: &str = - "Cannot watch for changes because the current `AssetReader` does not support it. If you are using \ - the FileAssetReader (the default on desktop platforms), enabling the filesystem_watcher feature will \ - add this functionality."; diff --git a/crates/bevy_core/src/lib.rs b/crates/bevy_core/src/lib.rs index e5682d3a92262..c222af6a9d9fe 100644 --- a/crates/bevy_core/src/lib.rs +++ b/crates/bevy_core/src/lib.rs @@ -154,7 +154,10 @@ impl Plugin for FrameCountPlugin { } } -fn update_frame_count(mut frame_count: ResMut) { +/// A system used to increment [`FrameCount`] with wrapping addition. +/// +/// See [`FrameCount`] for more details. +pub fn update_frame_count(mut frame_count: ResMut) { frame_count.0 = frame_count.0.wrapping_add(1); } diff --git a/crates/bevy_core/src/task_pool_options.rs b/crates/bevy_core/src/task_pool_options.rs index 3eaedb8c8f972..a6eb39df6c723 100644 --- a/crates/bevy_core/src/task_pool_options.rs +++ b/crates/bevy_core/src/task_pool_options.rs @@ -107,7 +107,7 @@ impl TaskPoolOptions { trace!("IO Threads: {}", io_threads); remaining_threads = remaining_threads.saturating_sub(io_threads); - IoTaskPool::init(|| { + IoTaskPool::get_or_init(|| { TaskPoolBuilder::default() .num_threads(io_threads) .thread_name("IO Task Pool".to_string()) @@ -124,7 +124,7 @@ impl TaskPoolOptions { trace!("Async Compute Threads: {}", async_compute_threads); remaining_threads = remaining_threads.saturating_sub(async_compute_threads); - AsyncComputeTaskPool::init(|| { + AsyncComputeTaskPool::get_or_init(|| { TaskPoolBuilder::default() .num_threads(async_compute_threads) .thread_name("Async Compute Task Pool".to_string()) @@ -141,7 +141,7 @@ impl TaskPoolOptions { trace!("Compute Threads: {}", compute_threads); - ComputeTaskPool::init(|| { + ComputeTaskPool::get_or_init(|| { TaskPoolBuilder::default() .num_threads(compute_threads) .thread_name("Compute Task Pool".to_string()) diff --git a/crates/bevy_core_pipeline/src/blit/blit.wgsl b/crates/bevy_core_pipeline/src/blit/blit.wgsl index 5ee6c1a6f92cf..82521bf312154 100644 --- a/crates/bevy_core_pipeline/src/blit/blit.wgsl +++ b/crates/bevy_core_pipeline/src/blit/blit.wgsl @@ -1,4 +1,4 @@ -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput @group(0) @binding(0) var in_texture: texture_2d; @group(0) @binding(1) var in_sampler: sampler; diff --git a/crates/bevy_core_pipeline/src/bloom/bloom.wgsl b/crates/bevy_core_pipeline/src/bloom/bloom.wgsl index 3666f105b1180..8234b72213690 100644 --- a/crates/bevy_core_pipeline/src/bloom/bloom.wgsl +++ b/crates/bevy_core_pipeline/src/bloom/bloom.wgsl @@ -6,8 +6,6 @@ // * [COD] - Next Generation Post Processing in Call of Duty - http://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare // * [PBB] - Physically Based Bloom - https://learnopengl.com/Guest-Articles/2022/Phys.-Based-Bloom -#import bevy_core_pipeline::fullscreen_vertex_shader - struct BloomUniforms { threshold_precomputations: vec4, viewport: vec4, diff --git a/crates/bevy_core_pipeline/src/bloom/mod.rs b/crates/bevy_core_pipeline/src/bloom/mod.rs index 9f8d0aec2946f..c1299794df570 100644 --- a/crates/bevy_core_pipeline/src/bloom/mod.rs +++ b/crates/bevy_core_pipeline/src/bloom/mod.rs @@ -170,30 +170,16 @@ impl ViewNode for BloomNode { // First downsample pass { - let downsampling_first_bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("bloom_downsampling_first_bind_group"), - layout: &downsampling_pipeline_res.bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - // Read from main texture directly - resource: BindingResource::TextureView( - view_target.main_texture_view(), - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&bind_groups.sampler), - }, - BindGroupEntry { - binding: 2, - resource: uniforms.clone(), - }, - ], - }); + let downsampling_first_bind_group = render_context.render_device().create_bind_group( + "bloom_downsampling_first_bind_group", + &downsampling_pipeline_res.bind_group_layout, + &BindGroupEntries::sequential(( + // Read from main texture directly + view_target.main_texture_view(), + &bind_groups.sampler, + uniforms.clone(), + )), + ); let view = &bloom_texture.view(0); let mut downsampling_first_pass = @@ -416,46 +402,28 @@ fn prepare_bloom_bind_groups( let mut downsampling_bind_groups = Vec::with_capacity(bind_group_count); for mip in 1..bloom_texture.mip_count { - downsampling_bind_groups.push(render_device.create_bind_group(&BindGroupDescriptor { - label: Some("bloom_downsampling_bind_group"), - layout: &downsampling_pipeline.bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(&bloom_texture.view(mip - 1)), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(sampler), - }, - BindGroupEntry { - binding: 2, - resource: uniforms.binding().unwrap(), - }, - ], - })); + downsampling_bind_groups.push(render_device.create_bind_group( + "bloom_downsampling_bind_group", + &downsampling_pipeline.bind_group_layout, + &BindGroupEntries::sequential(( + &bloom_texture.view(mip - 1), + sampler, + uniforms.binding().unwrap(), + )), + )); } let mut upsampling_bind_groups = Vec::with_capacity(bind_group_count); for mip in (0..bloom_texture.mip_count).rev() { - upsampling_bind_groups.push(render_device.create_bind_group(&BindGroupDescriptor { - label: Some("bloom_upsampling_bind_group"), - layout: &upsampling_pipeline.bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(&bloom_texture.view(mip)), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(sampler), - }, - BindGroupEntry { - binding: 2, - resource: uniforms.binding().unwrap(), - }, - ], - })); + upsampling_bind_groups.push(render_device.create_bind_group( + "bloom_upsampling_bind_group", + &upsampling_pipeline.bind_group_layout, + &BindGroupEntries::sequential(( + &bloom_texture.view(mip), + sampler, + uniforms.binding().unwrap(), + )), + )); } commands.entity(entity).insert(BloomBindGroups { diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs index 0dd3f086ee27c..5bb8b87ebc58b 100644 --- a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs +++ b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs @@ -7,8 +7,8 @@ use bevy_render::{ extract_component::{ComponentUniforms, DynamicUniformIndex}, render_graph::{Node, NodeRunError, RenderGraphContext}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindingResource, BufferId, Operations, - PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, TextureViewId, + BindGroup, BindGroupEntries, BufferId, Operations, PipelineCache, + RenderPassColorAttachment, RenderPassDescriptor, TextureViewId, }, renderer::RenderContext, view::{ExtractedView, ViewTarget}, @@ -77,29 +77,15 @@ impl Node for CASNode { bind_group } cached_bind_group => { - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("cas_bind_group"), - layout: &sharpening_pipeline.texture_bind_group, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(view_target.source), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler( - &sharpening_pipeline.sampler, - ), - }, - BindGroupEntry { - binding: 2, - resource: uniforms, - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + "cas_bind_group", + &sharpening_pipeline.texture_bind_group, + &BindGroupEntries::sequential(( + view_target.source, + &sharpening_pipeline.sampler, + uniforms, + )), + ); let (_, _, bind_group) = cached_bind_group.insert((uniforms_id, source.id(), bind_group)); diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl index 68e299cfa1f9a..252d97c9d6c3e 100644 --- a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl +++ b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl @@ -17,7 +17,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput struct CASUniforms { sharpness: f32, diff --git a/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id.wgsl b/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id.wgsl index 2fc7b1d86748d..25acf47068222 100644 --- a/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id.wgsl +++ b/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id.wgsl @@ -1,5 +1,4 @@ -#import bevy_pbr::utils -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput @group(0) @binding(0) var material_id_texture: texture_2d; diff --git a/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs b/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs index cb68e472035cb..c60306286900a 100644 --- a/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs +++ b/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs @@ -18,10 +18,7 @@ use bevy_render::{ use bevy_ecs::query::QueryItem; use bevy_render::{ render_graph::{NodeRunError, RenderGraphContext, ViewNode}, - render_resource::{ - BindGroupDescriptor, BindGroupEntry, BindingResource, Operations, PipelineCache, - RenderPassDescriptor, - }, + render_resource::{Operations, PipelineCache, RenderPassDescriptor}, renderer::RenderContext, }; @@ -60,7 +57,7 @@ impl Plugin for CopyDeferredLightingIdPlugin { #[derive(Default)] pub struct CopyDeferredLightingIdNode; impl CopyDeferredLightingIdNode { - pub const NAME: &str = "copy_deferred_lighting_id"; + pub const NAME: &'static str = "copy_deferred_lighting_id"; } impl ViewNode for CopyDeferredLightingIdNode { @@ -94,18 +91,11 @@ impl ViewNode for CopyDeferredLightingIdNode { return Ok(()); }; - let bind_group = render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("copy_deferred_lighting_id_bind_group"), - layout: ©_deferred_lighting_id_pipeline.layout, - entries: &[BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &deferred_lighting_pass_id_texture.default_view, - ), - }], - }); + let bind_group = render_context.render_device().create_bind_group( + "copy_deferred_lighting_id_bind_group", + ©_deferred_lighting_id_pipeline.layout, + &BindGroupEntries::single(&deferred_lighting_pass_id_texture.default_view), + ); let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { label: Some("copy_deferred_lighting_id_pass"), diff --git a/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl b/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl index d2302a2267b25..2ff080de5e8e5 100644 --- a/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl +++ b/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl @@ -6,7 +6,7 @@ // // Tweaks by mrDIMAS - https://github.com/FyroxEngine/Fyrox/blob/master/src/renderer/shaders/fxaa_fs.glsl -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput @group(0) @binding(0) var screenTexture: texture_2d; @group(0) @binding(1) var samp: sampler; diff --git a/crates/bevy_core_pipeline/src/fxaa/node.rs b/crates/bevy_core_pipeline/src/fxaa/node.rs index 2daaf0584d981..7eaf4dce268ad 100644 --- a/crates/bevy_core_pipeline/src/fxaa/node.rs +++ b/crates/bevy_core_pipeline/src/fxaa/node.rs @@ -6,9 +6,8 @@ use bevy_ecs::query::QueryItem; use bevy_render::{ render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindingResource, FilterMode, Operations, - PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, SamplerDescriptor, - TextureViewId, + BindGroup, BindGroupEntries, FilterMode, Operations, PipelineCache, + RenderPassColorAttachment, RenderPassDescriptor, SamplerDescriptor, TextureViewId, }, renderer::RenderContext, view::ViewTarget, @@ -61,23 +60,11 @@ impl ViewNode for FxaaNode { ..default() }); - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: None, - layout: &fxaa_pipeline.texture_bind_group, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(source), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&sampler), - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + None, + &fxaa_pipeline.texture_bind_group, + &BindGroupEntries::sequential((source, &sampler)), + ); let (_, bind_group) = cached_bind_group.insert((source.id(), bind_group)); bind_group diff --git a/crates/bevy_core_pipeline/src/msaa_writeback.rs b/crates/bevy_core_pipeline/src/msaa_writeback.rs index 0646d4ce67ffb..d80bc0fce7bc9 100644 --- a/crates/bevy_core_pipeline/src/msaa_writeback.rs +++ b/crates/bevy_core_pipeline/src/msaa_writeback.rs @@ -8,6 +8,7 @@ use bevy_ecs::prelude::*; use bevy_render::{ camera::ExtractedCamera, render_graph::{Node, NodeRunError, RenderGraphApp, RenderGraphContext}, + render_resource::BindGroupEntries, renderer::RenderContext, view::{Msaa, ViewTarget}, Render, RenderSet, @@ -90,23 +91,11 @@ impl Node for MsaaWritebackNode { depth_stencil_attachment: None, }; - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: None, - layout: &blit_pipeline.texture_bind_group, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(post_process.source), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&blit_pipeline.sampler), - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + None, + &blit_pipeline.texture_bind_group, + &BindGroupEntries::sequential((post_process.source, &blit_pipeline.sampler)), + ); let mut render_pass = render_context .command_encoder() diff --git a/crates/bevy_core_pipeline/src/skybox/mod.rs b/crates/bevy_core_pipeline/src/skybox/mod.rs index efc133651930e..11caa03afd8aa 100644 --- a/crates/bevy_core_pipeline/src/skybox/mod.rs +++ b/crates/bevy_core_pipeline/src/skybox/mod.rs @@ -10,13 +10,13 @@ use bevy_render::{ extract_component::{ExtractComponent, ExtractComponentPlugin}, render_asset::RenderAssets, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, - BindGroupLayoutEntry, BindingResource, BindingType, BufferBindingType, - CachedRenderPipelineId, ColorTargetState, ColorWrites, CompareFunction, DepthBiasState, - DepthStencilState, FragmentState, MultisampleState, PipelineCache, PrimitiveState, - RenderPipelineDescriptor, SamplerBindingType, Shader, ShaderStages, ShaderType, - SpecializedRenderPipeline, SpecializedRenderPipelines, StencilFaceState, StencilState, - TextureFormat, TextureSampleType, TextureViewDimension, VertexState, + BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, + BindGroupLayoutEntry, BindingType, BufferBindingType, CachedRenderPipelineId, + ColorTargetState, ColorWrites, CompareFunction, DepthBiasState, DepthStencilState, + FragmentState, MultisampleState, PipelineCache, PrimitiveState, RenderPipelineDescriptor, + SamplerBindingType, Shader, ShaderStages, ShaderType, SpecializedRenderPipeline, + SpecializedRenderPipelines, StencilFaceState, StencilState, TextureFormat, + TextureSampleType, TextureViewDimension, VertexState, }, renderer::RenderDevice, texture::{BevyDefault, Image}, @@ -224,24 +224,15 @@ fn prepare_skybox_bind_groups( if let (Some(skybox), Some(view_uniforms)) = (images.get(&skybox.0), view_uniforms.uniforms.binding()) { - let bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("skybox_bind_group"), - layout: &pipeline.bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(&skybox.texture_view), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&skybox.sampler), - }, - BindGroupEntry { - binding: 2, - resource: view_uniforms, - }, - ], - }); + let bind_group = render_device.create_bind_group( + "skybox_bind_group", + &pipeline.bind_group_layout, + &BindGroupEntries::sequential(( + &skybox.texture_view, + &skybox.sampler, + view_uniforms, + )), + ); commands.entity(entity).insert(SkyboxBindGroup(bind_group)); } diff --git a/crates/bevy_core_pipeline/src/skybox/skybox.wgsl b/crates/bevy_core_pipeline/src/skybox/skybox.wgsl index cf22c2b2970f2..7da40da7937d4 100644 --- a/crates/bevy_core_pipeline/src/skybox/skybox.wgsl +++ b/crates/bevy_core_pipeline/src/skybox/skybox.wgsl @@ -1,5 +1,5 @@ -#import bevy_render::view View -#import bevy_pbr::utils coords_to_viewport_uv +#import bevy_render::view::View +#import bevy_pbr::utils::coords_to_viewport_uv @group(0) @binding(0) var skybox: texture_cube; @group(0) @binding(1) var skybox_sampler: sampler; diff --git a/crates/bevy_core_pipeline/src/taa/mod.rs b/crates/bevy_core_pipeline/src/taa/mod.rs index e61dccc5d4bf5..28926a1b8ed0d 100644 --- a/crates/bevy_core_pipeline/src/taa/mod.rs +++ b/crates/bevy_core_pipeline/src/taa/mod.rs @@ -21,13 +21,13 @@ use bevy_render::{ prelude::{Camera, Projection}, render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, render_resource::{ - BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, - BindGroupLayoutEntry, BindingResource, BindingType, CachedRenderPipelineId, - ColorTargetState, ColorWrites, Extent3d, FilterMode, FragmentState, MultisampleState, - Operations, PipelineCache, PrimitiveState, RenderPassColorAttachment, RenderPassDescriptor, - RenderPipelineDescriptor, Sampler, SamplerBindingType, SamplerDescriptor, Shader, - ShaderStages, SpecializedRenderPipeline, SpecializedRenderPipelines, TextureDescriptor, - TextureDimension, TextureFormat, TextureSampleType, TextureUsages, TextureViewDimension, + BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, BindGroupLayoutEntry, + BindingType, CachedRenderPipelineId, ColorTargetState, ColorWrites, Extent3d, FilterMode, + FragmentState, MultisampleState, Operations, PipelineCache, PrimitiveState, + RenderPassColorAttachment, RenderPassDescriptor, RenderPipelineDescriptor, Sampler, + SamplerBindingType, SamplerDescriptor, Shader, ShaderStages, SpecializedRenderPipeline, + SpecializedRenderPipelines, TextureDescriptor, TextureDimension, TextureFormat, + TextureSampleType, TextureUsages, TextureViewDimension, }, renderer::{RenderContext, RenderDevice}, texture::{BevyDefault, CachedTexture, TextureCache}, @@ -197,45 +197,18 @@ impl ViewNode for TAANode { }; let view_target = view_target.post_process_write(); - let taa_bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("taa_bind_group"), - layout: &pipelines.taa_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(view_target.source), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView( - &taa_history_textures.read.default_view, - ), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView( - &prepass_motion_vectors_texture.default_view, - ), - }, - BindGroupEntry { - binding: 3, - resource: BindingResource::TextureView( - &prepass_depth_texture.default_view, - ), - }, - BindGroupEntry { - binding: 4, - resource: BindingResource::Sampler(&pipelines.nearest_sampler), - }, - BindGroupEntry { - binding: 5, - resource: BindingResource::Sampler(&pipelines.linear_sampler), - }, - ], - }); + let taa_bind_group = render_context.render_device().create_bind_group( + "taa_bind_group", + &pipelines.taa_bind_group_layout, + &BindGroupEntries::sequential(( + view_target.source, + &taa_history_textures.read.default_view, + &prepass_motion_vectors_texture.default_view, + &prepass_depth_texture.default_view, + &pipelines.nearest_sampler, + &pipelines.linear_sampler, + )), + ); { let mut taa_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { diff --git a/crates/bevy_core_pipeline/src/taa/taa.wgsl b/crates/bevy_core_pipeline/src/taa/taa.wgsl index 53be6b02f0ecf..43414b63121c0 100644 --- a/crates/bevy_core_pipeline/src/taa/taa.wgsl +++ b/crates/bevy_core_pipeline/src/taa/taa.wgsl @@ -10,8 +10,6 @@ const DEFAULT_HISTORY_BLEND_RATE: f32 = 0.1; // Default blend rate to use when no confidence in history const MIN_HISTORY_BLEND_RATE: f32 = 0.015; // Minimum blend rate allowed, to ensure at least some of the current sample is used -#import bevy_core_pipeline::fullscreen_vertex_shader - @group(0) @binding(0) var view_target: texture_2d; @group(0) @binding(1) var history: texture_2d; @group(0) @binding(2) var motion_vectors: texture_2d; diff --git a/crates/bevy_core_pipeline/src/tonemapping/mod.rs b/crates/bevy_core_pipeline/src/tonemapping/mod.rs index 2af1d48b6eed0..6aada19c458e5 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/mod.rs +++ b/crates/bevy_core_pipeline/src/tonemapping/mod.rs @@ -306,8 +306,7 @@ pub fn get_lut_bindings<'a>( images: &'a RenderAssets, tonemapping_luts: &'a TonemappingLuts, tonemapping: &Tonemapping, - bindings: [u32; 2], -) -> [BindGroupEntry<'a>; 2] { +) -> (&'a TextureView, &'a Sampler) { let image = match tonemapping { // AgX lut texture used when tonemapping doesn't need a texture since it's very small (32x32x32) Tonemapping::None @@ -320,16 +319,7 @@ pub fn get_lut_bindings<'a>( Tonemapping::BlenderFilmic => &tonemapping_luts.blender_filmic, }; let lut_image = images.get(image).unwrap(); - [ - BindGroupEntry { - binding: bindings[0], - resource: BindingResource::TextureView(&lut_image.texture_view), - }, - BindGroupEntry { - binding: bindings[1], - resource: BindingResource::Sampler(&lut_image.sampler), - }, - ] + (&lut_image.texture_view, &lut_image.sampler) } pub fn get_lut_bind_group_layout_entries(bindings: [u32; 2]) -> [BindGroupLayoutEntry; 2] { diff --git a/crates/bevy_core_pipeline/src/tonemapping/node.rs b/crates/bevy_core_pipeline/src/tonemapping/node.rs index e3da4aa03e417..1d1c95d970850 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/node.rs +++ b/crates/bevy_core_pipeline/src/tonemapping/node.rs @@ -7,9 +7,8 @@ use bevy_render::{ render_asset::RenderAssets, render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindingResource, BufferId, LoadOp, - Operations, PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, - SamplerDescriptor, TextureViewId, + BindGroup, BindGroupEntries, BufferId, LoadOp, Operations, PipelineCache, + RenderPassColorAttachment, RenderPassDescriptor, SamplerDescriptor, TextureViewId, }, renderer::RenderContext, texture::Image, @@ -88,36 +87,19 @@ impl ViewNode for TonemappingNode { let tonemapping_luts = world.resource::(); - let mut entries = vec![ - BindGroupEntry { - binding: 0, - resource: view_uniforms.binding().unwrap(), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView(source), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::Sampler(&sampler), - }, - ]; - - entries.extend(get_lut_bindings( - gpu_images, - tonemapping_luts, - tonemapping, - [3, 4], - )); - - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: None, - layout: &tonemapping_pipeline.texture_bind_group, - entries: &entries, - }); + let lut_bindings = get_lut_bindings(gpu_images, tonemapping_luts, tonemapping); + + let bind_group = render_context.render_device().create_bind_group( + None, + &tonemapping_pipeline.texture_bind_group, + &BindGroupEntries::sequential(( + view_uniforms, + source, + &sampler, + lut_bindings.0, + lut_bindings.1, + )), + ); let (_, _, bind_group) = cached_bind_group.insert((view_uniforms_id, source.id(), bind_group)); diff --git a/crates/bevy_core_pipeline/src/tonemapping/tonemapping.wgsl b/crates/bevy_core_pipeline/src/tonemapping/tonemapping.wgsl index 4c73a891c81eb..a4eee79c7402a 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/tonemapping.wgsl +++ b/crates/bevy_core_pipeline/src/tonemapping/tonemapping.wgsl @@ -1,8 +1,10 @@ #define TONEMAPPING_PASS -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput -#import bevy_render::view View -#import bevy_core_pipeline::tonemapping tone_mapping, powsafe, screen_space_dither +#import bevy_render::view::View +#import bevy_core_pipeline::{ + fullscreen_vertex_shader::FullscreenVertexOutput, + tonemapping::{tone_mapping, powsafe, screen_space_dither}, +} @group(0) @binding(0) var view: View; @@ -11,8 +13,6 @@ @group(0) @binding(3) var dt_lut_texture: texture_3d; @group(0) @binding(4) var dt_lut_sampler: sampler; -#import bevy_core_pipeline::tonemapping - @fragment fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { let hdr_color = textureSample(hdr_texture, hdr_sampler, in.uv); @@ -21,7 +21,7 @@ fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { #ifdef DEBAND_DITHER output_rgb = powsafe(output_rgb.rgb, 1.0 / 2.2); - output_rgb = output_rgb + bevy_core_pipeline::tonemapping::screen_space_dither(in.position.xy); + output_rgb = output_rgb + screen_space_dither(in.position.xy); // This conversion back to linear space is required because our output texture format is // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. output_rgb = powsafe(output_rgb.rgb, 2.2); diff --git a/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared.wgsl b/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared.wgsl index b8ca8f0d68f81..92da49b8242b8 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared.wgsl +++ b/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_core_pipeline::tonemapping -#import bevy_render::view View, ColorGrading +#import bevy_render::view::ColorGrading // hack !! not sure what to do with this #ifdef TONEMAPPING_PASS diff --git a/crates/bevy_core_pipeline/src/upscaling/node.rs b/crates/bevy_core_pipeline/src/upscaling/node.rs index 76ff1d195c998..536b2b9437515 100644 --- a/crates/bevy_core_pipeline/src/upscaling/node.rs +++ b/crates/bevy_core_pipeline/src/upscaling/node.rs @@ -4,9 +4,8 @@ use bevy_render::{ camera::{CameraOutputMode, ExtractedCamera}, render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindingResource, LoadOp, Operations, - PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, SamplerDescriptor, - TextureViewId, + BindGroup, BindGroupEntries, LoadOp, Operations, PipelineCache, RenderPassColorAttachment, + RenderPassDescriptor, SamplerDescriptor, TextureViewId, }, renderer::RenderContext, view::ViewTarget, @@ -57,23 +56,11 @@ impl ViewNode for UpscalingNode { .render_device() .create_sampler(&SamplerDescriptor::default()); - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: None, - layout: &blit_pipeline.texture_bind_group, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(upscaled_texture), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&sampler), - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + None, + &blit_pipeline.texture_bind_group, + &BindGroupEntries::sequential((upscaled_texture, &sampler)), + ); let (_, bind_group) = cached_bind_group.insert((upscaled_texture.id(), bind_group)); bind_group diff --git a/crates/bevy_diagnostic/src/frame_time_diagnostics_plugin.rs b/crates/bevy_diagnostic/src/frame_time_diagnostics_plugin.rs index a17a6a19d4c13..7d7360a3b7a04 100644 --- a/crates/bevy_diagnostic/src/frame_time_diagnostics_plugin.rs +++ b/crates/bevy_diagnostic/src/frame_time_diagnostics_plugin.rs @@ -2,7 +2,7 @@ use crate::{Diagnostic, DiagnosticId, Diagnostics, RegisterDiagnostic}; use bevy_app::prelude::*; use bevy_core::FrameCount; use bevy_ecs::prelude::*; -use bevy_time::Time; +use bevy_time::{Real, Time}; /// Adds "frame time" diagnostic to an App, specifically "frame time", "fps" and "frame count" #[derive(Default)] @@ -30,12 +30,12 @@ impl FrameTimeDiagnosticsPlugin { pub fn diagnostic_system( mut diagnostics: Diagnostics, - time: Res

for SetMeshViewBindGroup { type Param = (); diff --git a/crates/bevy_pbr/src/render/mesh.wgsl b/crates/bevy_pbr/src/render/mesh.wgsl index a14c6b1d25a2e..029c05a6ed5f2 100644 --- a/crates/bevy_pbr/src/render/mesh.wgsl +++ b/crates/bevy_pbr/src/render/mesh.wgsl @@ -1,10 +1,11 @@ -#import bevy_pbr::mesh_functions as mesh_functions -#import bevy_pbr::skinning -#import bevy_pbr::morph -#import bevy_pbr::mesh_bindings mesh -#import bevy_pbr::forward_io Vertex, VertexOutput -#import bevy_render::instance_index get_instance_index -#import bevy_pbr::view_transformations position_world_to_clip +#import bevy_pbr::{ + mesh_functions, + skinning, + morph::morph, + forward_io::{Vertex, VertexOutput}, + view_transformations::position_world_to_clip, +} +#import bevy_render::instance_index::get_instance_index #ifdef MORPH_TARGETS fn morph_vertex(vertex_in: Vertex) -> Vertex { @@ -15,12 +16,12 @@ fn morph_vertex(vertex_in: Vertex) -> Vertex { if weight == 0.0 { continue; } - vertex.position += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::position_offset, i); + vertex.position += weight * morph(vertex.index, bevy_pbr::morph::position_offset, i); #ifdef VERTEX_NORMALS - vertex.normal += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::normal_offset, i); + vertex.normal += weight * morph(vertex.index, bevy_pbr::morph::normal_offset, i); #endif #ifdef VERTEX_TANGENTS - vertex.tangent += vec4(weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); + vertex.tangent += vec4(weight * morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); #endif } return vertex; @@ -38,7 +39,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #endif #ifdef SKINNED - var model = bevy_pbr::skinning::skin_model(vertex.joint_indices, vertex.joint_weights); + var model = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); #else // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. // See https://github.com/gfx-rs/naga/issues/2416 . @@ -47,7 +48,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #ifdef VERTEX_NORMALS #ifdef SKINNED - out.world_normal = bevy_pbr::skinning::skin_normals(model, vertex.normal); + out.world_normal = skinning::skin_normals(model, vertex.normal); #else out.world_normal = mesh_functions::mesh_normal_local_to_world( vertex.normal, diff --git a/crates/bevy_pbr/src/render/mesh_bindings.rs b/crates/bevy_pbr/src/render/mesh_bindings.rs index dcc01e1aa4c8b..bf45fd12b8c83 100644 --- a/crates/bevy_pbr/src/render/mesh_bindings.rs +++ b/crates/bevy_pbr/src/render/mesh_bindings.rs @@ -4,8 +4,7 @@ use bevy_math::Mat4; use bevy_render::{ mesh::morph::MAX_MORPH_WEIGHTS, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupLayout, BindGroupLayoutDescriptor, - BindingResource, Buffer, TextureView, + BindGroup, BindGroupLayout, BindGroupLayoutDescriptor, BindingResource, Buffer, TextureView, }, renderer::RenderDevice, }; @@ -179,11 +178,11 @@ impl MeshLayouts { // ---------- BindGroup methods ---------- pub fn model_only(&self, render_device: &RenderDevice, model: &BindingResource) -> BindGroup { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[entry::model(0, model.clone())], - layout: &self.model_only, - label: Some("model_only_mesh_bind_group"), - }) + render_device.create_bind_group( + "model_only_mesh_bind_group", + &self.model_only, + &[entry::model(0, model.clone())], + ) } pub fn skinned( &self, @@ -191,11 +190,11 @@ impl MeshLayouts { model: &BindingResource, skin: &Buffer, ) -> BindGroup { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[entry::model(0, model.clone()), entry::skinning(1, skin)], - layout: &self.skinned, - label: Some("skinned_mesh_bind_group"), - }) + render_device.create_bind_group( + "skinned_mesh_bind_group", + &self.skinned, + &[entry::model(0, model.clone()), entry::skinning(1, skin)], + ) } pub fn morphed( &self, @@ -204,15 +203,15 @@ impl MeshLayouts { weights: &Buffer, targets: &TextureView, ) -> BindGroup { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ + render_device.create_bind_group( + "morphed_mesh_bind_group", + &self.morphed, + &[ entry::model(0, model.clone()), entry::weights(2, weights), entry::targets(3, targets), ], - layout: &self.morphed, - label: Some("morphed_mesh_bind_group"), - }) + ) } pub fn morphed_skinned( &self, @@ -222,15 +221,15 @@ impl MeshLayouts { weights: &Buffer, targets: &TextureView, ) -> BindGroup { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ + render_device.create_bind_group( + "morphed_skinned_mesh_bind_group", + &self.morphed_skinned, + &[ entry::model(0, model.clone()), entry::skinning(1, skin), entry::weights(2, weights), entry::targets(3, targets), ], - layout: &self.morphed_skinned, - label: Some("morphed_skinned_mesh_bind_group"), - }) + ) } } diff --git a/crates/bevy_pbr/src/render/mesh_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_bindings.wgsl index 1d8cf31221350..81cca44e6c591 100644 --- a/crates/bevy_pbr/src/render/mesh_bindings.wgsl +++ b/crates/bevy_pbr/src/render/mesh_bindings.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::mesh_bindings -#import bevy_pbr::mesh_types Mesh +#import bevy_pbr::mesh_types::Mesh #ifdef MESH_BINDGROUP_1 diff --git a/crates/bevy_pbr/src/render/mesh_functions.wgsl b/crates/bevy_pbr/src/render/mesh_functions.wgsl index 4f926b8ac2b37..4757148e018a9 100644 --- a/crates/bevy_pbr/src/render/mesh_functions.wgsl +++ b/crates/bevy_pbr/src/render/mesh_functions.wgsl @@ -1,11 +1,15 @@ #define_import_path bevy_pbr::mesh_functions -#import bevy_pbr::mesh_view_bindings view -#import bevy_pbr::mesh_bindings mesh -#import bevy_pbr::mesh_types MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT -#import bevy_render::instance_index get_instance_index -#import bevy_render::maths affine_to_square, mat2x4_f32_to_mat3x3_unpack -#import bevy_pbr::view_transformations position_world_to_clip +#import bevy_pbr::{ + mesh_view_bindings::view, + mesh_bindings::mesh, + mesh_types::MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT, + view_transformations::position_world_to_clip, +} +#import bevy_render::{ + instance_index::get_instance_index, + maths::{affine_to_square, mat2x4_f32_to_mat3x3_unpack}, +} fn get_model_matrix(instance_index: u32) -> mat4x4 { return affine_to_square(mesh[get_instance_index(instance_index)].model); diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.rs b/crates/bevy_pbr/src/render/mesh_view_bindings.rs new file mode 100644 index 0000000000000..ba66561c6d785 --- /dev/null +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.rs @@ -0,0 +1,453 @@ +use std::array; + +use bevy_core_pipeline::{ + prepass::ViewPrepassTextures, + tonemapping::{ + get_lut_bind_group_layout_entries, get_lut_bindings, Tonemapping, TonemappingLuts, + }, +}; +use bevy_ecs::{ + component::Component, + entity::Entity, + system::{Commands, Query, Res}, +}; +use bevy_render::{ + globals::{GlobalsBuffer, GlobalsUniform}, + render_asset::RenderAssets, + render_resource::{ + BindGroup, BindGroupLayout, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingType, + BufferBindingType, DynamicBindGroupEntries, SamplerBindingType, ShaderStages, ShaderType, + TextureFormat, TextureSampleType, TextureViewDimension, + }, + renderer::RenderDevice, + texture::{BevyDefault, FallbackImageCubemap, FallbackImageMsaa, Image}, + view::{Msaa, ViewUniform, ViewUniforms}, +}; + +use crate::{ + environment_map, prepass, EnvironmentMapLight, FogMeta, GlobalLightMeta, GpuFog, GpuLights, + GpuPointLights, LightMeta, MeshPipeline, MeshPipelineKey, ScreenSpaceAmbientOcclusionTextures, + ShadowSamplers, ViewClusterBindings, ViewShadowBindings, +}; + +#[derive(Clone)] +pub struct MeshPipelineViewLayout { + pub bind_group_layout: BindGroupLayout, + + #[cfg(debug_assertions)] + pub texture_count: usize, +} + +bitflags::bitflags! { + /// A key that uniquely identifies a [`MeshPipelineViewLayout`]. + /// + /// Used to generate all possible layouts for the mesh pipeline in [`generate_view_layouts`], + /// so special care must be taken to not add too many flags, as the number of possible layouts + /// will grow exponentially. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] + #[repr(transparent)] + pub struct MeshPipelineViewLayoutKey: u32 { + const MULTISAMPLED = (1 << 0); + const DEPTH_PREPASS = (1 << 1); + const NORMAL_PREPASS = (1 << 2); + const MOTION_VECTOR_PREPASS = (1 << 3); + const DEFERRED_PREPASS = (1 << 4); + } +} + +impl MeshPipelineViewLayoutKey { + // The number of possible layouts + pub const COUNT: usize = Self::all().bits() as usize + 1; + + /// Builds a unique label for each layout based on the flags + pub fn label(&self) -> String { + use MeshPipelineViewLayoutKey as Key; + + format!( + "mesh_view_layout{}{}{}{}{}", + self.contains(Key::MULTISAMPLED) + .then_some("_multisampled") + .unwrap_or_default(), + self.contains(Key::DEPTH_PREPASS) + .then_some("_depth") + .unwrap_or_default(), + self.contains(Key::NORMAL_PREPASS) + .then_some("_normal") + .unwrap_or_default(), + self.contains(Key::MOTION_VECTOR_PREPASS) + .then_some("_motion") + .unwrap_or_default(), + self.contains(Key::DEFERRED_PREPASS) + .then_some("_deferred") + .unwrap_or_default(), + ) + } +} + +impl From for MeshPipelineViewLayoutKey { + fn from(value: MeshPipelineKey) -> Self { + let mut result = MeshPipelineViewLayoutKey::empty(); + + if value.msaa_samples() > 1 { + result |= MeshPipelineViewLayoutKey::MULTISAMPLED; + } + if value.contains(MeshPipelineKey::DEPTH_PREPASS) { + result |= MeshPipelineViewLayoutKey::DEPTH_PREPASS; + } + if value.contains(MeshPipelineKey::NORMAL_PREPASS) { + result |= MeshPipelineViewLayoutKey::NORMAL_PREPASS; + } + if value.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { + result |= MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS; + } + if value.contains(MeshPipelineKey::DEFERRED_PREPASS) { + result |= MeshPipelineViewLayoutKey::DEFERRED_PREPASS; + } + + result + } +} + +impl From for MeshPipelineViewLayoutKey { + fn from(value: Msaa) -> Self { + let mut result = MeshPipelineViewLayoutKey::empty(); + + if value.samples() > 1 { + result |= MeshPipelineViewLayoutKey::MULTISAMPLED; + } + + result + } +} + +impl From> for MeshPipelineViewLayoutKey { + fn from(value: Option<&ViewPrepassTextures>) -> Self { + let mut result = MeshPipelineViewLayoutKey::empty(); + + if let Some(prepass_textures) = value { + if prepass_textures.depth.is_some() { + result |= MeshPipelineViewLayoutKey::DEPTH_PREPASS; + } + if prepass_textures.normal.is_some() { + result |= MeshPipelineViewLayoutKey::NORMAL_PREPASS; + } + if prepass_textures.motion_vectors.is_some() { + result |= MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS; + } + if prepass_textures.deferred.is_some() { + result |= MeshPipelineViewLayoutKey::DEFERRED_PREPASS; + } + } + + result + } +} + +/// Returns the appropriate bind group layout vec based on the parameters +fn layout_entries( + clustered_forward_buffer_binding_type: BufferBindingType, + layout_key: MeshPipelineViewLayoutKey, +) -> Vec { + let mut entries = vec![ + // View + BindGroupLayoutEntry { + binding: 0, + visibility: ShaderStages::VERTEX | ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: BufferBindingType::Uniform, + has_dynamic_offset: true, + min_binding_size: Some(ViewUniform::min_size()), + }, + count: None, + }, + // Lights + BindGroupLayoutEntry { + binding: 1, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: BufferBindingType::Uniform, + has_dynamic_offset: true, + min_binding_size: Some(GpuLights::min_size()), + }, + count: None, + }, + // Point Shadow Texture Cube Array + BindGroupLayoutEntry { + binding: 2, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Texture { + multisampled: false, + sample_type: TextureSampleType::Depth, + #[cfg(any(not(feature = "webgl"), not(target_arch = "wasm32")))] + view_dimension: TextureViewDimension::CubeArray, + #[cfg(all(feature = "webgl", target_arch = "wasm32"))] + view_dimension: TextureViewDimension::Cube, + }, + count: None, + }, + // Point Shadow Texture Array Sampler + BindGroupLayoutEntry { + binding: 3, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Sampler(SamplerBindingType::Comparison), + count: None, + }, + // Directional Shadow Texture Array + BindGroupLayoutEntry { + binding: 4, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Texture { + multisampled: false, + sample_type: TextureSampleType::Depth, + #[cfg(any(not(feature = "webgl"), not(target_arch = "wasm32")))] + view_dimension: TextureViewDimension::D2Array, + #[cfg(all(feature = "webgl", target_arch = "wasm32"))] + view_dimension: TextureViewDimension::D2, + }, + count: None, + }, + // Directional Shadow Texture Array Sampler + BindGroupLayoutEntry { + binding: 5, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Sampler(SamplerBindingType::Comparison), + count: None, + }, + // PointLights + BindGroupLayoutEntry { + binding: 6, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: clustered_forward_buffer_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(GpuPointLights::min_size( + clustered_forward_buffer_binding_type, + )), + }, + count: None, + }, + // ClusteredLightIndexLists + BindGroupLayoutEntry { + binding: 7, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: clustered_forward_buffer_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(ViewClusterBindings::min_size_cluster_light_index_lists( + clustered_forward_buffer_binding_type, + )), + }, + count: None, + }, + // ClusterOffsetsAndCounts + BindGroupLayoutEntry { + binding: 8, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: clustered_forward_buffer_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(ViewClusterBindings::min_size_cluster_offsets_and_counts( + clustered_forward_buffer_binding_type, + )), + }, + count: None, + }, + // Globals + BindGroupLayoutEntry { + binding: 9, + visibility: ShaderStages::VERTEX_FRAGMENT, + ty: BindingType::Buffer { + ty: BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: Some(GlobalsUniform::min_size()), + }, + count: None, + }, + // Fog + BindGroupLayoutEntry { + binding: 10, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: BufferBindingType::Uniform, + has_dynamic_offset: true, + min_binding_size: Some(GpuFog::min_size()), + }, + count: None, + }, + // Screen space ambient occlusion texture + BindGroupLayoutEntry { + binding: 11, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Texture { + multisampled: false, + sample_type: TextureSampleType::Float { filterable: false }, + view_dimension: TextureViewDimension::D2, + }, + count: None, + }, + ]; + + // EnvironmentMapLight + let environment_map_entries = environment_map::get_bind_group_layout_entries([12, 13, 14]); + entries.extend_from_slice(&environment_map_entries); + + // Tonemapping + let tonemapping_lut_entries = get_lut_bind_group_layout_entries([15, 16]); + entries.extend_from_slice(&tonemapping_lut_entries); + + if cfg!(any(not(feature = "webgl"), not(target_arch = "wasm32"))) + || (cfg!(all(feature = "webgl", target_arch = "wasm32")) + && !layout_key.contains(MeshPipelineViewLayoutKey::MULTISAMPLED)) + { + entries.extend_from_slice(&prepass::get_bind_group_layout_entries( + [17, 18, 19, 20], + layout_key, + )); + } + + entries +} + +/// Generates all possible view layouts for the mesh pipeline, based on all combinations of +/// [`MeshPipelineViewLayoutKey`] flags. +pub fn generate_view_layouts( + render_device: &RenderDevice, + clustered_forward_buffer_binding_type: BufferBindingType, +) -> [MeshPipelineViewLayout; MeshPipelineViewLayoutKey::COUNT] { + array::from_fn(|i| { + let key = MeshPipelineViewLayoutKey::from_bits_truncate(i as u32); + let entries = layout_entries(clustered_forward_buffer_binding_type, key); + + #[cfg(debug_assertions)] + let texture_count: usize = entries + .iter() + .filter(|entry| matches!(entry.ty, BindingType::Texture { .. })) + .count(); + + MeshPipelineViewLayout { + bind_group_layout: render_device.create_bind_group_layout(&BindGroupLayoutDescriptor { + label: Some(key.label().as_str()), + entries: &entries, + }), + #[cfg(debug_assertions)] + texture_count, + } + }) +} + +#[derive(Component)] +pub struct MeshViewBindGroup { + pub value: BindGroup, +} + +#[allow(clippy::too_many_arguments)] +pub fn prepare_mesh_view_bind_groups( + mut commands: Commands, + render_device: Res, + mesh_pipeline: Res, + shadow_samplers: Res, + light_meta: Res, + global_light_meta: Res, + fog_meta: Res, + view_uniforms: Res, + views: Query<( + Entity, + &ViewShadowBindings, + &ViewClusterBindings, + Option<&ScreenSpaceAmbientOcclusionTextures>, + Option<&ViewPrepassTextures>, + Option<&EnvironmentMapLight>, + &Tonemapping, + )>, + (images, mut fallback_images, fallback_cubemap): ( + Res>, + FallbackImageMsaa, + Res, + ), + msaa: Res, + globals_buffer: Res, + tonemapping_luts: Res, +) { + if let ( + Some(view_binding), + Some(light_binding), + Some(point_light_binding), + Some(globals), + Some(fog_binding), + ) = ( + view_uniforms.uniforms.binding(), + light_meta.view_gpu_lights.binding(), + global_light_meta.gpu_point_lights.binding(), + globals_buffer.buffer.binding(), + fog_meta.gpu_fogs.binding(), + ) { + for ( + entity, + shadow_bindings, + cluster_bindings, + ssao_textures, + prepass_textures, + environment_map, + tonemapping, + ) in &views + { + let fallback_ssao = fallback_images + .image_for_samplecount(1, TextureFormat::bevy_default()) + .texture_view + .clone(); + let ssao_view = ssao_textures + .map(|t| &t.screen_space_ambient_occlusion_texture.default_view) + .unwrap_or(&fallback_ssao); + + let layout = &mesh_pipeline.get_view_layout( + MeshPipelineViewLayoutKey::from(*msaa) + | MeshPipelineViewLayoutKey::from(prepass_textures), + ); + + let mut entries = DynamicBindGroupEntries::new_with_indices(( + (0, view_binding.clone()), + (1, light_binding.clone()), + (2, &shadow_bindings.point_light_depth_texture_view), + (3, &shadow_samplers.point_light_sampler), + (4, &shadow_bindings.directional_light_depth_texture_view), + (5, &shadow_samplers.directional_light_sampler), + (6, point_light_binding.clone()), + (7, cluster_bindings.light_index_lists_binding().unwrap()), + (8, cluster_bindings.offsets_and_counts_binding().unwrap()), + (9, globals.clone()), + (10, fog_binding.clone()), + (11, ssao_view), + )); + + let env_map_bindings = + environment_map::get_bindings(environment_map, &images, &fallback_cubemap); + entries = entries.extend_with_indices(( + (12, env_map_bindings.0), + (13, env_map_bindings.1), + (14, env_map_bindings.2), + )); + + let lut_bindings = get_lut_bindings(&images, &tonemapping_luts, tonemapping); + entries = entries.extend_with_indices(((15, lut_bindings.0), (16, lut_bindings.1))); + + // When using WebGL, we can't have a depth texture with multisampling + let prepass_bindings; + if cfg!(any(not(feature = "webgl"), not(target_arch = "wasm32"))) || msaa.samples() == 1 + { + prepass_bindings = prepass::get_bindings(prepass_textures); + for (binding, index) in prepass_bindings + .iter() + .map(Option::as_ref) + .zip([17, 18, 19, 20]) + .flat_map(|(b, i)| b.map(|b| (b, i))) + { + entries = entries.extend_with_indices(((index, binding),)); + } + } + + commands.entity(entity).insert(MeshViewBindGroup { + value: render_device.create_bind_group("mesh_view_bind_group", layout, &entries), + }); + } + } +} diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl index f3b51ddc50abb..1e863f4207f5c 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl @@ -1,8 +1,10 @@ #define_import_path bevy_pbr::mesh_view_bindings #import bevy_pbr::mesh_view_types as types -#import bevy_render::view View -#import bevy_render::globals Globals +#import bevy_render::{ + view::View, + globals::Globals, +} @group(0) @binding(0) var view: View; @group(0) @binding(1) var lights: types::Lights; @@ -42,12 +44,31 @@ @group(0) @binding(16) var dt_lut_sampler: sampler; #ifdef MULTISAMPLED + +#ifdef DEPTH_PREPASS @group(0) @binding(17) var depth_prepass_texture: texture_depth_multisampled_2d; +#endif // DEPTH_PREPASS +#ifdef NORMAL_PREPASS @group(0) @binding(18) var normal_prepass_texture: texture_multisampled_2d; +#endif // NORMAL_PREPASS +#ifdef MOTION_VECTOR_PREPASS @group(0) @binding(19) var motion_vector_prepass_texture: texture_multisampled_2d; -#else +#endif // MOTION_VECTOR_PREPASS + +#else // MULTISAMPLED + +#ifdef DEPTH_PREPASS @group(0) @binding(17) var depth_prepass_texture: texture_depth_2d; +#endif // DEPTH_PREPASS +#ifdef NORMAL_PREPASS @group(0) @binding(18) var normal_prepass_texture: texture_2d; +#endif // NORMAL_PREPASS +#ifdef MOTION_VECTOR_PREPASS @group(0) @binding(19) var motion_vector_prepass_texture: texture_2d; +#endif // MOTION_VECTOR_PREPASS + +#endif // MULTISAMPLED + +#ifdef DEFERRED_PREPASS @group(0) @binding(20) var deferred_prepass_texture: texture_2d; -#endif +#endif // DEFERRED_PREPASS diff --git a/crates/bevy_pbr/src/render/mesh_view_types.wgsl b/crates/bevy_pbr/src/render/mesh_view_types.wgsl index b944aa2792e2f..f115d49d7898a 100644 --- a/crates/bevy_pbr/src/render/mesh_view_types.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_types.wgsl @@ -1,8 +1,5 @@ #define_import_path bevy_pbr::mesh_view_types -#import bevy_render::view -#import bevy_render::globals - struct PointLight { // For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3] // For spot lights: the direction (x,z), spot_scale and spot_offset diff --git a/crates/bevy_pbr/src/render/mod.rs b/crates/bevy_pbr/src/render/mod.rs index b9d0d239c3874..7efffc05681f8 100644 --- a/crates/bevy_pbr/src/render/mod.rs +++ b/crates/bevy_pbr/src/render/mod.rs @@ -2,6 +2,7 @@ mod fog; mod light; pub(crate) mod mesh; mod mesh_bindings; +mod mesh_view_bindings; mod morph; mod skin; @@ -9,4 +10,5 @@ pub use fog::*; pub use light::*; pub use mesh::*; pub use mesh_bindings::MeshLayouts; +pub use mesh_view_bindings::*; pub use skin::{extract_skins, prepare_skins, SkinIndex, SkinUniform, MAX_JOINTS}; diff --git a/crates/bevy_pbr/src/render/morph.wgsl b/crates/bevy_pbr/src/render/morph.wgsl index 291b3efb5841a..7355f95f33e0c 100644 --- a/crates/bevy_pbr/src/render/morph.wgsl +++ b/crates/bevy_pbr/src/render/morph.wgsl @@ -1,15 +1,8 @@ -// If using this WGSL snippet as an #import, the following should be in scope: -// -// - the `morph_weights` uniform of type `MorphWeights` -// - the `morph_targets` 3d texture -// -// They are defined in `mesh_types.wgsl` and `mesh_bindings.wgsl`. - #define_import_path bevy_pbr::morph #ifdef MORPH_TARGETS -#import bevy_pbr::mesh_types MorphWeights +#import bevy_pbr::mesh_types::MorphWeights; #ifdef MESH_BINDGROUP_1 @@ -61,4 +54,4 @@ fn morph(vertex_index: u32, component_offset: u32, weight_index: u32) -> vec3) -> f32 { // We use `textureSampleLevel` over `textureSample` because the wgpu DX12 diff --git a/crates/bevy_pbr/src/render/pbr.wgsl b/crates/bevy_pbr/src/render/pbr.wgsl index dc84ee30dfa5f..def70dd89b3ed 100644 --- a/crates/bevy_pbr/src/render/pbr.wgsl +++ b/crates/bevy_pbr/src/render/pbr.wgsl @@ -1,226 +1,49 @@ -#define_import_path bevy_pbr::fragment - -#import bevy_pbr::pbr_functions as pbr_functions -#import bevy_pbr::pbr_bindings as pbr_bindings -#import bevy_pbr::pbr_types as pbr_types - -#import bevy_pbr::mesh_bindings mesh -#import bevy_pbr::mesh_view_bindings view, fog, screen_space_ambient_occlusion_texture -#import bevy_pbr::mesh_view_types FOG_MODE_OFF -#import bevy_core_pipeline::tonemapping screen_space_dither, powsafe, tone_mapping -#import bevy_pbr::parallax_mapping parallaxed_uv - -#import bevy_pbr::prepass_utils +#import bevy_pbr::{ + pbr_functions::alpha_discard, + pbr_fragment::pbr_input_from_standard_material, +} -#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION -#import bevy_pbr::gtao_utils gtao_multibounce +#ifdef PREPASS_PIPELINE +#import bevy_pbr::{ + prepass_io::{VertexOutput, FragmentOutput}, + pbr_deferred_functions::deferred_output, +} +#else +#import bevy_pbr::{ + forward_io::{VertexOutput, FragmentOutput}, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, + pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, +} #endif -#ifdef DEFERRED_PREPASS -#import bevy_pbr::pbr_deferred_functions deferred_gbuffer_from_pbr_input -#import bevy_pbr::pbr_prepass_functions calculate_motion_vector -#import bevy_pbr::prepass_io VertexOutput, FragmentOutput -#else // DEFERRED_PREPASS -#import bevy_pbr::forward_io VertexOutput, FragmentOutput -#endif // DEFERRED_PREPASS - -#ifdef MOTION_VECTOR_PREPASS -@group(0) @binding(2) -var previous_view_proj: mat4x4; -#endif // MOTION_VECTOR_PREPASS - @fragment fn fragment( in: VertexOutput, @builtin(front_facing) is_front: bool, ) -> FragmentOutput { - var out: FragmentOutput; - - // calculate unlit color - // --------------------- - var unlit_color: vec4 = pbr_bindings::material.base_color; - - let is_orthographic = view.projection[3].w == 1.0; - let V = pbr_functions::calculate_view(in.world_position, is_orthographic); -#ifdef VERTEX_UVS - var uv = in.uv; -#ifdef VERTEX_TANGENTS - if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DEPTH_MAP_BIT) != 0u) { - let N = in.world_normal; - let T = in.world_tangent.xyz; - let B = in.world_tangent.w * cross(N, T); - // Transform V from fragment to camera in world space to tangent space. - let Vt = vec3(dot(V, T), dot(V, B), dot(V, N)); - uv = parallaxed_uv( - pbr_bindings::material.parallax_depth_scale, - pbr_bindings::material.max_parallax_layer_count, - pbr_bindings::material.max_relief_mapping_search_steps, - uv, - // Flip the direction of Vt to go toward the surface to make the - // parallax mapping algorithm easier to understand and reason - // about. - -Vt, - ); - } -#endif -#endif - -#ifdef VERTEX_COLORS - unlit_color = unlit_color * in.color; -#endif -#ifdef VERTEX_UVS - if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) { - unlit_color = unlit_color * textureSampleBias(pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, uv, view.mip_bias); - } -#endif - - // gather pbr lighting data - // ------------------ - var pbr_input: pbr_types::PbrInput; - // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit - if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { - // Prepare a 'processed' StandardMaterial by sampling all textures to resolve - // the material members - - pbr_input.material.reflectance = pbr_bindings::material.reflectance; - pbr_input.material.flags = pbr_bindings::material.flags; - pbr_input.material.alpha_cutoff = pbr_bindings::material.alpha_cutoff; - pbr_input.frag_coord = in.position; - pbr_input.world_position = in.world_position; - pbr_input.is_orthographic = is_orthographic; - pbr_input.flags = mesh[in.instance_index].flags; - - // emmissive - // TODO use .a for exposure compensation in HDR - var emissive: vec4 = pbr_bindings::material.emissive; -#ifdef VERTEX_UVS - if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) { - emissive = vec4(emissive.rgb * textureSampleBias(pbr_bindings::emissive_texture, pbr_bindings::emissive_sampler, uv, view.mip_bias).rgb, 1.0); - } -#endif - pbr_input.material.emissive = emissive; - - // metallic and perceptual roughness - var metallic: f32 = pbr_bindings::material.metallic; - var perceptual_roughness: f32 = pbr_bindings::material.perceptual_roughness; -#ifdef VERTEX_UVS - if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) { - let metallic_roughness = textureSampleBias(pbr_bindings::metallic_roughness_texture, pbr_bindings::metallic_roughness_sampler, uv, view.mip_bias); - // Sampling from GLTF standard channels for now - metallic = metallic * metallic_roughness.b; - perceptual_roughness = perceptual_roughness * metallic_roughness.g; - } -#endif - pbr_input.material.metallic = metallic; - pbr_input.material.perceptual_roughness = perceptual_roughness; - - // occlusion - // TODO: Split into diffuse/specular occlusion? - var occlusion: vec3 = vec3(1.0); -#ifdef VERTEX_UVS - if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) { - occlusion = vec3(textureSampleBias(pbr_bindings::occlusion_texture, pbr_bindings::occlusion_sampler, uv, view.mip_bias).r); - } -#endif -#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION - let ssao = textureLoad(screen_space_ambient_occlusion_texture, vec2(in.position.xy), 0i).r; - let ssao_multibounce = gtao_multibounce(ssao, pbr_input.material.base_color.rgb); - occlusion = min(occlusion, ssao_multibounce); -#endif - pbr_input.occlusion = occlusion; + // generate a PbrInput struct from the StandardMaterial bindings + var pbr_input = pbr_input_from_standard_material(in, is_front); - // world normal - pbr_input.world_normal = pbr_functions::prepare_world_normal( - in.world_normal, - (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u, - is_front, - ); + // alpha discard + pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); - // N (normal vector) -#ifdef LOAD_PREPASS_NORMALS - pbr_input.N = bevy_pbr::prepass_utils::prepass_normal(in.position, 0u); +#ifdef PREPASS_PIPELINE + // write the gbuffer, lighting pass id, and optionally normal and motion_vector textures + let out = deferred_output(in, pbr_input); #else - pbr_input.N = pbr_functions::apply_normal_mapping( - pbr_bindings::material.flags, - pbr_input.world_normal, -#ifdef VERTEX_TANGENTS -#ifdef STANDARDMATERIAL_NORMAL_MAP - in.world_tangent, -#endif -#endif -#ifdef VERTEX_UVS - uv, -#endif - view.mip_bias, - ); -#endif - - // V (view vector) - pbr_input.V = V; - - } else { // if UNLIT_BIT != 0 -#ifdef DEFERRED_PREPASS - // in deferred mode, we need to fill some of the pbr input data even for unlit materials - // to pass through the gbuffer to the deferred lighting shader - pbr_input = pbr_types::pbr_input_new(); - pbr_input.flags = mesh[in.instance_index].flags; - pbr_input.material.flags = pbr_bindings::material.flags; - pbr_input.world_position = in.world_position; -#endif - } - - // apply alpha discard - // ------------------- - // note even though this is based on the unlit color, it must be done after all texture samples for uniform control flow - unlit_color = pbr_functions::alpha_discard(pbr_bindings::material, unlit_color); - pbr_input.material.base_color = unlit_color; - - // generate output - // --------------- -#ifdef DEFERRED_PREPASS - // write the gbuffer - out.deferred = deferred_gbuffer_from_pbr_input(pbr_input); - out.deferred_lighting_pass_id = pbr_bindings::material.deferred_lighting_pass_id; -#ifdef NORMAL_PREPASS - out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); -#endif -#ifdef MOTION_VECTOR_PREPASS - out.motion_vector = calculate_motion_vector(in.world_position, in.previous_world_position); -#endif // MOTION_VECTOR_PREPASS - -#else // DEFERRED_PREPASS - // in forward mode, we calculate the lit color immediately, and then apply some post-lighting effects here. // in deferred mode the lit color and these effects will be calculated in the deferred lighting shader - var output_color = unlit_color; - if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { - output_color = pbr_functions::pbr(pbr_input); - } - - if (fog.mode != FOG_MODE_OFF && (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) { - output_color = pbr_functions::apply_fog(fog, output_color, in.world_position.xyz, view.world_position.xyz); + var out: FragmentOutput; + if (pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { + out.color = apply_pbr_lighting(pbr_input); + } else { + out.color = pbr_input.material.base_color; } -#ifdef TONEMAP_IN_SHADER - output_color = tone_mapping(output_color, view.color_grading); -#ifdef DEBAND_DITHER - var output_rgb = output_color.rgb; - output_rgb = powsafe(output_rgb, 1.0 / 2.2); - output_rgb = output_rgb + screen_space_dither(in.position.xy); - // This conversion back to linear space is required because our output texture format is - // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. - output_rgb = powsafe(output_rgb, 2.2); - output_color = vec4(output_rgb, output_color.a); -#endif + // apply in-shader post processing (fog, alpha-premultiply, and also tonemapping, debanding if the camera is non-hdr) + // note this does not include fullscreen postprocessing effects like bloom. + out.color = main_pass_post_lighting_processing(pbr_input, out.color); #endif -#ifdef PREMULTIPLY_ALPHA - output_color = pbr_functions::premultiply_alpha(pbr_bindings::material.flags, output_color); -#endif - - // write the final pixel color - out.color = output_color; - -#endif //DEFERRED_PREPASS return out; } diff --git a/crates/bevy_pbr/src/render/pbr_ambient.wgsl b/crates/bevy_pbr/src/render/pbr_ambient.wgsl index 28afd5588d3ec..7b174da35c9db 100644 --- a/crates/bevy_pbr/src/render/pbr_ambient.wgsl +++ b/crates/bevy_pbr/src/render/pbr_ambient.wgsl @@ -1,7 +1,9 @@ #define_import_path bevy_pbr::ambient -#import bevy_pbr::lighting EnvBRDFApprox, F_AB -#import bevy_pbr::mesh_view_bindings lights +#import bevy_pbr::{ + lighting::{EnvBRDFApprox, F_AB}, + mesh_view_bindings::lights, +} // A precomputed `NdotV` is provided because it is computed regardless, // but `world_normal` and the view vector `V` are provided separately for more advanced uses. @@ -18,5 +20,10 @@ fn ambient_light( let diffuse_ambient = EnvBRDFApprox(diffuse_color, F_AB(1.0, NdotV)); let specular_ambient = EnvBRDFApprox(specular_color, F_AB(perceptual_roughness, NdotV)); - return (diffuse_ambient + specular_ambient) * lights.ambient_color.rgb * occlusion; + // No real world material has specular values under 0.02, so we use this range as a + // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. + // See: https://google.github.io/filament/Filament.html#specularocclusion + let specular_occlusion = saturate(dot(specular_color, vec3(50.0 * 0.33))); + + return (diffuse_ambient + specular_ambient * specular_occlusion) * lights.ambient_color.rgb * occlusion; } diff --git a/crates/bevy_pbr/src/render/pbr_bindings.wgsl b/crates/bevy_pbr/src/render/pbr_bindings.wgsl index 73a414f0c77d1..fc5cdd280c2b9 100644 --- a/crates/bevy_pbr/src/render/pbr_bindings.wgsl +++ b/crates/bevy_pbr/src/render/pbr_bindings.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::pbr_bindings -#import bevy_pbr::pbr_types StandardMaterial +#import bevy_pbr::pbr_types::StandardMaterial @group(1) @binding(0) var material: StandardMaterial; @group(1) @binding(1) var base_color_texture: texture_2d; diff --git a/crates/bevy_pbr/src/render/pbr_fragment.wgsl b/crates/bevy_pbr/src/render/pbr_fragment.wgsl new file mode 100644 index 0000000000000..47d4f6f6d883b --- /dev/null +++ b/crates/bevy_pbr/src/render/pbr_fragment.wgsl @@ -0,0 +1,165 @@ +#define_import_path bevy_pbr::pbr_fragment + +#import bevy_pbr::{ + pbr_functions, + pbr_bindings, + pbr_types, + prepass_utils, + mesh_bindings::mesh, + mesh_view_bindings::view, + parallax_mapping::parallaxed_uv, +} + +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION +#import bevy_pbr::mesh_view_bindings::screen_space_ambient_occlusion_texture +#import bevy_pbr::gtao_utils::gtao_multibounce +#endif + +#ifdef PREPASS_PIPELINE +#import bevy_pbr::prepass_io::VertexOutput +#else +#import bevy_pbr::forward_io::VertexOutput +#endif + +// prepare a basic PbrInput from the vertex stage output, mesh binding and view binding +fn pbr_input_from_vertex_output( + in: VertexOutput, + is_front: bool, + double_sided: bool, +) -> pbr_types::PbrInput { + var pbr_input: pbr_types::PbrInput = pbr_types::pbr_input_new(); + + pbr_input.flags = mesh[in.instance_index].flags; + pbr_input.is_orthographic = view.projection[3].w == 1.0; + pbr_input.V = pbr_functions::calculate_view(in.world_position, pbr_input.is_orthographic); + pbr_input.frag_coord = in.position; + pbr_input.world_position = in.world_position; + +#ifdef VERTEX_COLORS + pbr_input.material.base_color = in.color; +#endif + + pbr_input.world_normal = pbr_functions::prepare_world_normal( + in.world_normal, + double_sided, + is_front, + ); + +#ifdef LOAD_PREPASS_NORMALS + pbr_input.N = prepass_utils::prepass_normal(in.position, 0u); +#else + pbr_input.N = normalize(pbr_input.world_normal); +#endif + + return pbr_input; +} + +// Prepare a full PbrInput by sampling all textures to resolve +// the material members +fn pbr_input_from_standard_material( + in: VertexOutput, + is_front: bool, +) -> pbr_types::PbrInput { + let double_sided = (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u; + + var pbr_input: pbr_types::PbrInput = pbr_input_from_vertex_output(in, is_front, double_sided); + pbr_input.material.flags = pbr_bindings::material.flags; + pbr_input.material.base_color *= pbr_bindings::material.base_color; + pbr_input.material.deferred_lighting_pass_id = pbr_bindings::material.deferred_lighting_pass_id; + +#ifdef VERTEX_UVS + var uv = in.uv; + +#ifdef VERTEX_TANGENTS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DEPTH_MAP_BIT) != 0u) { + let V = pbr_input.V; + let N = in.world_normal; + let T = in.world_tangent.xyz; + let B = in.world_tangent.w * cross(N, T); + // Transform V from fragment to camera in world space to tangent space. + let Vt = vec3(dot(V, T), dot(V, B), dot(V, N)); + uv = parallaxed_uv( + pbr_bindings::material.parallax_depth_scale, + pbr_bindings::material.max_parallax_layer_count, + pbr_bindings::material.max_relief_mapping_search_steps, + uv, + // Flip the direction of Vt to go toward the surface to make the + // parallax mapping algorithm easier to understand and reason + // about. + -Vt, + ); + } +#endif // VERTEX_TANGENTS + + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) { + pbr_input.material.base_color *= textureSampleBias(pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, uv, view.mip_bias); + } +#endif // VERTEX_UVS + + pbr_input.material.flags = pbr_bindings::material.flags; + + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { + + pbr_input.material.reflectance = pbr_bindings::material.reflectance; + pbr_input.material.alpha_cutoff = pbr_bindings::material.alpha_cutoff; + + // emissive + // TODO use .a for exposure compensation in HDR + var emissive: vec4 = pbr_bindings::material.emissive; +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) { + emissive = vec4(emissive.rgb * textureSampleBias(pbr_bindings::emissive_texture, pbr_bindings::emissive_sampler, uv, view.mip_bias).rgb, 1.0); + } +#endif + pbr_input.material.emissive = emissive; + + // metallic and perceptual roughness + var metallic: f32 = pbr_bindings::material.metallic; + var perceptual_roughness: f32 = pbr_bindings::material.perceptual_roughness; +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) { + let metallic_roughness = textureSampleBias(pbr_bindings::metallic_roughness_texture, pbr_bindings::metallic_roughness_sampler, uv, view.mip_bias); + // Sampling from GLTF standard channels for now + metallic *= metallic_roughness.b; + perceptual_roughness *= metallic_roughness.g; + } +#endif + pbr_input.material.metallic = metallic; + pbr_input.material.perceptual_roughness = perceptual_roughness; + + // occlusion + // TODO: Split into diffuse/specular occlusion? + var occlusion: vec3 = vec3(1.0); +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) { + occlusion = vec3(textureSampleBias(pbr_bindings::occlusion_texture, pbr_bindings::occlusion_sampler, uv, view.mip_bias).r); + } +#endif +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION + let ssao = textureLoad(screen_space_ambient_occlusion_texture, vec2(in.position.xy), 0i).r; + let ssao_multibounce = gtao_multibounce(ssao, pbr_input.material.base_color.rgb); + occlusion = min(occlusion, ssao_multibounce); +#endif + pbr_input.occlusion = occlusion; + + // N (normal vector) +#ifndef LOAD_PREPASS_NORMALS + pbr_input.N = pbr_functions::apply_normal_mapping( + pbr_bindings::material.flags, + pbr_input.world_normal, +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + in.world_tangent, +#endif +#endif +#ifdef VERTEX_UVS + uv, +#endif + view.mip_bias, + ); +#endif + } + + return pbr_input; +} diff --git a/crates/bevy_pbr/src/render/pbr_functions.wgsl b/crates/bevy_pbr/src/render/pbr_functions.wgsl index e587f242a1437..9979f75be0d3a 100644 --- a/crates/bevy_pbr/src/render/pbr_functions.wgsl +++ b/crates/bevy_pbr/src/render/pbr_functions.wgsl @@ -1,23 +1,23 @@ #define_import_path bevy_pbr::pbr_functions -#ifdef TONEMAP_IN_SHADER -#import bevy_core_pipeline::tonemapping -#endif +#import bevy_pbr::{ + pbr_types, + pbr_bindings, + mesh_view_bindings as view_bindings, + mesh_view_types, + lighting, + clustered_forward as clustering, + shadows, + ambient, + mesh_types::MESH_FLAGS_SHADOW_RECEIVER_BIT, +} -#import bevy_pbr::pbr_types as pbr_types -#import bevy_pbr::pbr_bindings as pbr_bindings -#import bevy_pbr::mesh_view_bindings as view_bindings -#import bevy_pbr::mesh_view_types as mesh_view_types -#import bevy_pbr::lighting as lighting -#import bevy_pbr::clustered_forward as clustering -#import bevy_pbr::shadows as shadows -#import bevy_pbr::fog as fog -#import bevy_pbr::ambient as ambient #ifdef ENVIRONMENT_MAP #import bevy_pbr::environment_map #endif -#import bevy_pbr::mesh_types MESH_FLAGS_SHADOW_RECEIVER_BIT +#import bevy_core_pipeline::tonemapping::{screen_space_dither, powsafe, tone_mapping} + fn alpha_discard(material: pbr_types::StandardMaterial, output_color: vec4) -> vec4 { var color = output_color; @@ -137,7 +137,7 @@ fn calculate_view( } #ifndef PREPASS_FRAGMENT -fn pbr( +fn apply_pbr_lighting( in: pbr_types::PbrInput, ) -> vec4 { var output_color: vec4 = in.material.base_color; @@ -223,7 +223,7 @@ fn pbr( // Environment map light (indirect) #ifdef ENVIRONMENT_MAP - let environment_light = bevy_pbr::environment_map::environment_map_light(perceptual_roughness, roughness, diffuse_color, NdotV, f_ab, in.N, R, F0); + let environment_light = environment_map::environment_map_light(perceptual_roughness, roughness, diffuse_color, NdotV, f_ab, in.N, R, F0); indirect_light += (environment_light.diffuse * occlusion) + environment_light.specular; #endif @@ -247,7 +247,6 @@ fn pbr( } #endif // PREPASS_FRAGMENT -#ifndef PREPASS_FRAGMENT fn apply_fog(fog_params: mesh_view_types::Fog, input_color: vec4, fragment_world_position: vec3, view_world_position: vec3) -> vec4 { let view_to_world = fragment_world_position.xyz - view_world_position.xyz; @@ -274,18 +273,17 @@ fn apply_fog(fog_params: mesh_view_types::Fog, input_color: vec4, fragment_ } if fog_params.mode == mesh_view_types::FOG_MODE_LINEAR { - return fog::linear_fog(fog_params, input_color, distance, scattering); + return bevy_pbr::fog::linear_fog(fog_params, input_color, distance, scattering); } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL { - return fog::exponential_fog(fog_params, input_color, distance, scattering); + return bevy_pbr::fog::exponential_fog(fog_params, input_color, distance, scattering); } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL_SQUARED { - return fog::exponential_squared_fog(fog_params, input_color, distance, scattering); + return bevy_pbr::fog::exponential_squared_fog(fog_params, input_color, distance, scattering); } else if fog_params.mode == mesh_view_types::FOG_MODE_ATMOSPHERIC { - return fog::atmospheric_fog(fog_params, input_color, distance, scattering); + return bevy_pbr::fog::atmospheric_fog(fog_params, input_color, distance, scattering); } else { return input_color; } } -#endif // PREPASS_FRAGMENT #ifdef PREMULTIPLY_ALPHA fn premultiply_alpha(standard_material_flags: u32, color: vec4) -> vec4 { @@ -338,3 +336,34 @@ fn premultiply_alpha(standard_material_flags: u32, color: vec4) -> vec4, +) -> vec4 { + var output_color = input_color; + + // fog + if (view_bindings::fog.mode != mesh_view_types::FOG_MODE_OFF && (pbr_input.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) { + output_color = apply_fog(view_bindings::fog, output_color, pbr_input.world_position.xyz, view_bindings::view.world_position.xyz); + } + +#ifdef TONEMAP_IN_SHADER + output_color = tone_mapping(output_color, view_bindings::view.color_grading); +#ifdef DEBAND_DITHER + var output_rgb = output_color.rgb; + output_rgb = powsafe(output_rgb, 1.0 / 2.2); + output_rgb += screen_space_dither(pbr_input.frag_coord.xy); + // This conversion back to linear space is required because our output texture format is + // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. + output_rgb = powsafe(output_rgb, 2.2); + output_color = vec4(output_rgb, output_color.a); +#endif +#endif +#ifdef PREMULTIPLY_ALPHA + output_color = premultiply_alpha(pbr_input.material.flags, output_color); +#endif + return output_color; +} diff --git a/crates/bevy_pbr/src/render/pbr_lighting.wgsl b/crates/bevy_pbr/src/render/pbr_lighting.wgsl index 6658c43034060..bc279ca594ad3 100644 --- a/crates/bevy_pbr/src/render/pbr_lighting.wgsl +++ b/crates/bevy_pbr/src/render/pbr_lighting.wgsl @@ -1,8 +1,10 @@ #define_import_path bevy_pbr::lighting -#import bevy_pbr::utils PI -#import bevy_pbr::mesh_view_types as view_types -#import bevy_pbr::mesh_view_bindings as view_bindings +#import bevy_pbr::{ + utils::PI, + mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, + mesh_view_bindings as view_bindings, +} // From the Filament design doc // https://google.github.io/filament/Filament.html#table_symbols @@ -253,7 +255,7 @@ fn spot_light( // reconstruct spot dir from x/z and y-direction flag var spot_dir = vec3((*light).light_custom_data.x, 0.0, (*light).light_custom_data.y); spot_dir.y = sqrt(max(0.0, 1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z)); - if ((*light).flags & view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u { + if ((*light).flags & POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u { spot_dir.y = -spot_dir.y; } let light_to_frag = (*light).position_radius.xyz - world_position.xyz; diff --git a/crates/bevy_pbr/src/render/pbr_prepass.wgsl b/crates/bevy_pbr/src/render/pbr_prepass.wgsl index 479d64ce236c3..5af33bfc3e469 100644 --- a/crates/bevy_pbr/src/render/pbr_prepass.wgsl +++ b/crates/bevy_pbr/src/render/pbr_prepass.wgsl @@ -1,12 +1,11 @@ -#import bevy_pbr::pbr_prepass_functions -#import bevy_pbr::pbr_bindings -#import bevy_pbr::pbr_types -#ifdef NORMAL_PREPASS -#import bevy_pbr::pbr_functions -#endif // NORMAL_PREPASS - -#import bevy_pbr::prepass_io as prepass_io -#import bevy_pbr::mesh_view_bindings view +#import bevy_pbr::{ + pbr_prepass_functions, + pbr_bindings::material, + pbr_types, + pbr_functions, + prepass_io, + mesh_view_bindings::view, +} #ifdef PREPASS_FRAGMENT @fragment @@ -14,7 +13,7 @@ fn fragment( in: prepass_io::VertexOutput, @builtin(front_facing) is_front: bool, ) -> prepass_io::FragmentOutput { - bevy_pbr::pbr_prepass_functions::prepass_alpha_discard(in); + pbr_prepass_functions::prepass_alpha_discard(in); var out: prepass_io::FragmentOutput; @@ -24,15 +23,15 @@ fn fragment( #ifdef NORMAL_PREPASS // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit - if (bevy_pbr::pbr_bindings::material.flags & bevy_pbr::pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { - let world_normal = bevy_pbr::pbr_functions::prepare_world_normal( + if (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { + let world_normal = pbr_functions::prepare_world_normal( in.world_normal, - (bevy_pbr::pbr_bindings::material.flags & bevy_pbr::pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u, + (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u, is_front, ); - let normal = bevy_pbr::pbr_functions::apply_normal_mapping( - bevy_pbr::pbr_bindings::material.flags, + let normal = pbr_functions::apply_normal_mapping( + material.flags, world_normal, #ifdef VERTEX_TANGENTS #ifdef STANDARDMATERIAL_NORMAL_MAP @@ -52,7 +51,7 @@ fn fragment( #endif // NORMAL_PREPASS #ifdef MOTION_VECTOR_PREPASS - out.motion_vector = bevy_pbr::pbr_prepass_functions::calculate_motion_vector(in.world_position, in.previous_world_position); + out.motion_vector = pbr_prepass_functions::calculate_motion_vector(in.world_position, in.previous_world_position); #endif return out; @@ -60,6 +59,6 @@ fn fragment( #else @fragment fn fragment(in: prepass_io::VertexOutput) { - bevy_pbr::pbr_prepass_functions::prepass_alpha_discard(in); + pbr_prepass_functions::prepass_alpha_discard(in); } #endif // PREPASS_FRAGMENT diff --git a/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl b/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl index 50afd8380c2a4..176c56aa1aba7 100644 --- a/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl +++ b/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl @@ -1,11 +1,12 @@ #define_import_path bevy_pbr::pbr_prepass_functions -#import bevy_pbr::prepass_io VertexOutput -#import bevy_pbr::prepass_bindings previous_view_proj -#import bevy_pbr::mesh_view_bindings view - -#import bevy_pbr::pbr_bindings as pbr_bindings -#import bevy_pbr::pbr_types as pbr_types +#import bevy_pbr::{ + prepass_io::VertexOutput, + prepass_bindings::previous_view_proj, + mesh_view_bindings::view, + pbr_bindings, + pbr_types, +} // Cutoff used for the premultiplied alpha modes BLEND and ADD. const PREMULTIPLIED_ALPHA_CUTOFF = 0.05; diff --git a/crates/bevy_pbr/src/render/shadow_sampling.wgsl b/crates/bevy_pbr/src/render/shadow_sampling.wgsl index 04d8920307a6c..0a93d5468b06b 100644 --- a/crates/bevy_pbr/src/render/shadow_sampling.wgsl +++ b/crates/bevy_pbr/src/render/shadow_sampling.wgsl @@ -1,7 +1,9 @@ #define_import_path bevy_pbr::shadow_sampling -#import bevy_pbr::mesh_view_bindings as view_bindings -#import bevy_pbr::utils PI +#import bevy_pbr::{ + mesh_view_bindings as view_bindings, + utils::PI, +} // Do the lookup, using HW 2x2 PCF and comparison fn sample_shadow_map_hardware(light_local: vec2, depth: f32, array_index: i32) -> f32 { diff --git a/crates/bevy_pbr/src/render/shadows.wgsl b/crates/bevy_pbr/src/render/shadows.wgsl index 9ace738252ea6..4ccbd5f850142 100644 --- a/crates/bevy_pbr/src/render/shadows.wgsl +++ b/crates/bevy_pbr/src/render/shadows.wgsl @@ -1,9 +1,11 @@ #define_import_path bevy_pbr::shadows -#import bevy_pbr::mesh_view_types POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE -#import bevy_pbr::mesh_view_bindings as view_bindings -#import bevy_pbr::utils hsv2rgb -#import bevy_pbr::shadow_sampling sample_shadow_map +#import bevy_pbr::{ + mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, + mesh_view_bindings as view_bindings, + utils::hsv2rgb, + shadow_sampling::sample_shadow_map +} const flip_z: vec3 = vec3(1.0, 1.0, -1.0); diff --git a/crates/bevy_pbr/src/render/skinning.wgsl b/crates/bevy_pbr/src/render/skinning.wgsl index 24678619a34c1..3f23629d1d4df 100644 --- a/crates/bevy_pbr/src/render/skinning.wgsl +++ b/crates/bevy_pbr/src/render/skinning.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::skinning -#import bevy_pbr::mesh_types SkinnedMesh +#import bevy_pbr::mesh_types::SkinnedMesh #ifdef SKINNED diff --git a/crates/bevy_pbr/src/render/wireframe.wgsl b/crates/bevy_pbr/src/render/wireframe.wgsl index b15b25175e565..ed5c24b4b21f3 100644 --- a/crates/bevy_pbr/src/render/wireframe.wgsl +++ b/crates/bevy_pbr/src/render/wireframe.wgsl @@ -1,4 +1,5 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput + struct WireframeMaterial { color: vec4, }; diff --git a/crates/bevy_pbr/src/ssao/gtao.wgsl b/crates/bevy_pbr/src/ssao/gtao.wgsl index 67f140a98866f..075612fd508f5 100644 --- a/crates/bevy_pbr/src/ssao/gtao.wgsl +++ b/crates/bevy_pbr/src/ssao/gtao.wgsl @@ -5,10 +5,14 @@ // Source code heavily based on XeGTAO v1.30 from Intel // https://github.com/GameTechDev/XeGTAO/blob/0d177ce06bfa642f64d8af4de1197ad1bcb862d4/Source/Rendering/Shaders/XeGTAO.hlsli -#import bevy_pbr::gtao_utils fast_acos -#import bevy_pbr::utils PI, HALF_PI -#import bevy_render::view View -#import bevy_render::globals Globals +#import bevy_pbr::{ + gtao_utils::fast_acos, + utils::{PI, HALF_PI}, +} +#import bevy_render::{ + view::View, + globals::Globals, +} @group(0) @binding(0) var preprocessed_depth: texture_2d; @group(0) @binding(1) var normals: texture_2d; diff --git a/crates/bevy_pbr/src/ssao/gtao_utils.wgsl b/crates/bevy_pbr/src/ssao/gtao_utils.wgsl index 11233ba05226b..f081393edb395 100644 --- a/crates/bevy_pbr/src/ssao/gtao_utils.wgsl +++ b/crates/bevy_pbr/src/ssao/gtao_utils.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::gtao_utils -#import bevy_pbr::utils PI, HALF_PI +#import bevy_pbr::utils::{PI, HALF_PI} // Approximates single-bounce ambient occlusion to multi-bounce ambient occlusion // https://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf#page=78 diff --git a/crates/bevy_pbr/src/ssao/mod.rs b/crates/bevy_pbr/src/ssao/mod.rs index eaabea3772654..59fb57cfac3c6 100644 --- a/crates/bevy_pbr/src/ssao/mod.rs +++ b/crates/bevy_pbr/src/ssao/mod.rs @@ -21,12 +21,11 @@ use bevy_render::{ prelude::Camera, render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, render_resource::{ - AddressMode, BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, - BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingResource, BindingType, - BufferBindingType, CachedComputePipelineId, ComputePassDescriptor, - ComputePipelineDescriptor, Extent3d, FilterMode, PipelineCache, Sampler, - SamplerBindingType, SamplerDescriptor, Shader, ShaderDefVal, ShaderStages, ShaderType, - SpecializedComputePipeline, SpecializedComputePipelines, StorageTextureAccess, + AddressMode, BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, + BindGroupLayoutEntry, BindingType, BufferBindingType, CachedComputePipelineId, + ComputePassDescriptor, ComputePipelineDescriptor, Extent3d, FilterMode, PipelineCache, + Sampler, SamplerBindingType, SamplerDescriptor, Shader, ShaderDefVal, ShaderStages, + ShaderType, SpecializedComputePipeline, SpecializedComputePipelines, StorageTextureAccess, TextureDescriptor, TextureDimension, TextureFormat, TextureSampleType, TextureUsages, TextureView, TextureViewDescriptor, TextureViewDimension, }, @@ -776,171 +775,63 @@ fn prepare_ssao_bind_groups( }; for (entity, ssao_textures, prepass_textures) in &views { - let common_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("ssao_common_bind_group"), - layout: &pipelines.common_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::Sampler(&pipelines.point_clamp_sampler), - }, - BindGroupEntry { - binding: 1, - resource: view_uniforms.clone(), - }, - ], - }); + let common_bind_group = render_device.create_bind_group( + "ssao_common_bind_group", + &pipelines.common_bind_group_layout, + &BindGroupEntries::sequential((&pipelines.point_clamp_sampler, view_uniforms.clone())), + ); - let preprocess_depth_mip_view_descriptor = TextureViewDescriptor { - format: Some(TextureFormat::R16Float), - dimension: Some(TextureViewDimension::D2), - mip_level_count: Some(1), - ..default() + let create_depth_view = |mip_level| { + ssao_textures + .preprocessed_depth_texture + .texture + .create_view(&TextureViewDescriptor { + label: Some("ssao_preprocessed_depth_texture_mip_view"), + base_mip_level: mip_level, + format: Some(TextureFormat::R16Float), + dimension: Some(TextureViewDimension::D2), + mip_level_count: Some(1), + ..default() + }) }; - let preprocess_depth_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("ssao_preprocess_depth_bind_group"), - layout: &pipelines.preprocess_depth_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &prepass_textures.depth.as_ref().unwrap().default_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_0"), - base_mip_level: 0, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_1"), - base_mip_level: 1, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - BindGroupEntry { - binding: 3, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_2"), - base_mip_level: 2, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - BindGroupEntry { - binding: 4, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_3"), - base_mip_level: 3, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - BindGroupEntry { - binding: 5, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_4"), - base_mip_level: 4, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - ], - }); + let preprocess_depth_bind_group = render_device.create_bind_group( + "ssao_preprocess_depth_bind_group", + &pipelines.preprocess_depth_bind_group_layout, + &BindGroupEntries::sequential(( + &prepass_textures.depth.as_ref().unwrap().default_view, + &create_depth_view(0), + &create_depth_view(1), + &create_depth_view(2), + &create_depth_view(3), + &create_depth_view(4), + )), + ); - let gtao_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("ssao_gtao_bind_group"), - layout: &pipelines.gtao_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &ssao_textures.preprocessed_depth_texture.default_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView( - &prepass_textures.normal.as_ref().unwrap().default_view, - ), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView(&pipelines.hilbert_index_lut), - }, - BindGroupEntry { - binding: 3, - resource: BindingResource::TextureView( - &ssao_textures.ssao_noisy_texture.default_view, - ), - }, - BindGroupEntry { - binding: 4, - resource: BindingResource::TextureView( - &ssao_textures.depth_differences_texture.default_view, - ), - }, - BindGroupEntry { - binding: 5, - resource: globals_uniforms.clone(), - }, - ], - }); + let gtao_bind_group = render_device.create_bind_group( + "ssao_gtao_bind_group", + &pipelines.gtao_bind_group_layout, + &BindGroupEntries::sequential(( + &ssao_textures.preprocessed_depth_texture.default_view, + &prepass_textures.normal.as_ref().unwrap().default_view, + &pipelines.hilbert_index_lut, + &ssao_textures.ssao_noisy_texture.default_view, + &ssao_textures.depth_differences_texture.default_view, + globals_uniforms.clone(), + )), + ); - let spatial_denoise_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("ssao_spatial_denoise_bind_group"), - layout: &pipelines.spatial_denoise_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &ssao_textures.ssao_noisy_texture.default_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView( - &ssao_textures.depth_differences_texture.default_view, - ), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView( - &ssao_textures - .screen_space_ambient_occlusion_texture - .default_view, - ), - }, - ], - }); + let spatial_denoise_bind_group = render_device.create_bind_group( + "ssao_spatial_denoise_bind_group", + &pipelines.spatial_denoise_bind_group_layout, + &BindGroupEntries::sequential(( + &ssao_textures.ssao_noisy_texture.default_view, + &ssao_textures.depth_differences_texture.default_view, + &ssao_textures + .screen_space_ambient_occlusion_texture + .default_view, + )), + ); commands.entity(entity).insert(SsaoBindGroups { common_bind_group, diff --git a/crates/bevy_pbr/src/ssao/preprocess_depth.wgsl b/crates/bevy_pbr/src/ssao/preprocess_depth.wgsl index d977148609d53..73dccaa02c09a 100644 --- a/crates/bevy_pbr/src/ssao/preprocess_depth.wgsl +++ b/crates/bevy_pbr/src/ssao/preprocess_depth.wgsl @@ -5,7 +5,7 @@ // Reference: https://research.nvidia.com/sites/default/files/pubs/2012-06_Scalable-Ambient-Obscurance/McGuire12SAO.pdf, section 2.2 -#import bevy_render::view View +#import bevy_render::view::View @group(0) @binding(0) var input_depth: texture_depth_2d; @group(0) @binding(1) var preprocessed_depth_mip0: texture_storage_2d; diff --git a/crates/bevy_pbr/src/ssao/spatial_denoise.wgsl b/crates/bevy_pbr/src/ssao/spatial_denoise.wgsl index 4cfe4cd350871..2448db309fce7 100644 --- a/crates/bevy_pbr/src/ssao/spatial_denoise.wgsl +++ b/crates/bevy_pbr/src/ssao/spatial_denoise.wgsl @@ -9,7 +9,7 @@ // XeGTAO does a 3x3 filter, on two pixels at a time per compute thread, applied twice // We do a 3x3 filter, on 1 pixel per compute thread, applied once -#import bevy_render::view View +#import bevy_render::view::View @group(0) @binding(0) var ambient_occlusion_noisy: texture_2d; @group(0) @binding(1) var depth_differences: texture_2d; diff --git a/crates/bevy_ptr/README.md b/crates/bevy_ptr/README.md index a08c3043ec391..c78a6ac544635 100644 --- a/crates/bevy_ptr/README.md +++ b/crates/bevy_ptr/README.md @@ -1,4 +1,10 @@ -# `bevy_ptr` +# Bevy Ptr + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy_ptr.svg)](https://crates.io/crates/bevy_ptr) +[![Downloads](https://img.shields.io/crates/d/bevy_ptr.svg)](https://crates.io/crates/bevy_ptr) +[![Docs](https://docs.rs/bevy_ptr/badge.svg)](https://docs.rs/bevy_ptr/latest/bevy_ptr/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) The `bevy_ptr` crate provides low-level abstractions for working with pointers in a more safe way than using rust's raw pointers. diff --git a/crates/bevy_reflect/README.md b/crates/bevy_reflect/README.md index dba4b0f2430f8..4289f869fff91 100644 --- a/crates/bevy_reflect/README.md +++ b/crates/bevy_reflect/README.md @@ -1,5 +1,11 @@ # Bevy Reflect +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy_reflect) +[![Downloads](https://img.shields.io/crates/d/bevy_reflect.svg)](https://crates.io/crates/bevy_reflect) +[![Docs](https://docs.rs/bevy_reflect/badge.svg)](https://docs.rs/bevy_reflect/latest/bevy_reflect/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) + This crate enables you to dynamically interact with Rust types: * Derive the Reflect traits diff --git a/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml b/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml index 5073873b638c5..bfb239e8105c5 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml +++ b/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml @@ -23,4 +23,3 @@ syn = { version = "2.0", features = ["full"] } proc-macro2 = "1.0" quote = "1.0" uuid = { version = "1.1", features = ["v4"] } -bit-set = "0.5.2" diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/derive_data.rs b/crates/bevy_reflect/bevy_reflect_derive/src/derive_data.rs index 0e5ef21dcaf69..59b3e2dd08a30 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/derive_data.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/derive_data.rs @@ -1,11 +1,11 @@ use crate::container_attributes::{FromReflectAttrs, ReflectTraits}; use crate::field_attributes::{parse_field_attrs, ReflectFieldAttr}; use crate::type_path::parse_path_no_leading_colon; -use crate::utility::{members_to_serialization_denylist, StringExpr, WhereClauseOptions}; -use bit_set::BitSet; +use crate::utility::{StringExpr, WhereClauseOptions}; use quote::{quote, ToTokens}; use syn::token::Comma; +use crate::serialization::SerializationDataDef; use crate::{ utility, REFLECT_ATTRIBUTE_NAME, REFLECT_VALUE_ATTRIBUTE_NAME, TYPE_NAME_ATTRIBUTE_NAME, TYPE_PATH_ATTRIBUTE_NAME, @@ -65,7 +65,7 @@ pub(crate) struct ReflectMeta<'a> { /// ``` pub(crate) struct ReflectStruct<'a> { meta: ReflectMeta<'a>, - serialization_denylist: BitSet, + serialization_data: Option, fields: Vec>, } @@ -95,7 +95,14 @@ pub(crate) struct StructField<'a> { /// The reflection-based attributes on the field. pub attrs: ReflectFieldAttr, /// The index of this field within the struct. - pub index: usize, + pub declaration_index: usize, + /// The index of this field as seen by the reflection API. + /// + /// This index accounts for the removal of [ignored] fields. + /// It will only be `Some(index)` when the field is not ignored. + /// + /// [ignored]: crate::field_attributes::ReflectIgnoreBehavior::IgnoreAlways + pub reflection_index: Option, /// The documentation for this field, if any #[cfg(feature = "documentation")] pub doc: crate::documentation::Documentation, @@ -272,9 +279,7 @@ impl<'a> ReflectDerive<'a> { let fields = Self::collect_struct_fields(&data.fields)?; let reflect_struct = ReflectStruct { meta, - serialization_denylist: members_to_serialization_denylist( - fields.iter().map(|v| v.attrs.ignore), - ), + serialization_data: SerializationDataDef::new(&fields)?, fields, }; @@ -308,19 +313,31 @@ impl<'a> ReflectDerive<'a> { } fn collect_struct_fields(fields: &'a Fields) -> Result>, syn::Error> { + let mut active_index = 0; let sifter: utility::ResultSifter> = fields .iter() .enumerate() - .map(|(index, field)| -> Result { - let attrs = parse_field_attrs(&field.attrs)?; - Ok(StructField { - index, - attrs, - data: field, - #[cfg(feature = "documentation")] - doc: crate::documentation::Documentation::from_attributes(&field.attrs), - }) - }) + .map( + |(declaration_index, field)| -> Result { + let attrs = parse_field_attrs(&field.attrs)?; + + let reflection_index = if attrs.ignore.is_ignored() { + None + } else { + active_index += 1; + Some(active_index - 1) + }; + + Ok(StructField { + declaration_index, + reflection_index, + attrs, + data: field, + #[cfg(feature = "documentation")] + doc: crate::documentation::Documentation::from_attributes(&field.attrs), + }) + }, + ) .fold( utility::ResultSifter::default(), utility::ResultSifter::fold, @@ -420,12 +437,9 @@ impl<'a> ReflectStruct<'a> { &self.meta } - /// Access the data about which fields should be ignored during serialization. - /// - /// The returned bitset is a collection of indices obtained from the [`members_to_serialization_denylist`] function. - #[allow(dead_code)] - pub fn serialization_denylist(&self) -> &BitSet { - &self.serialization_denylist + /// Returns the [`SerializationDataDef`] for this struct. + pub fn serialization_data(&self) -> Option<&SerializationDataDef> { + self.serialization_data.as_ref() } /// Returns the `GetTypeRegistration` impl as a `TokenStream`. @@ -438,7 +452,7 @@ impl<'a> ReflectStruct<'a> { crate::registration::impl_get_type_registration( self.meta(), where_clause_options, - Some(&self.serialization_denylist), + self.serialization_data(), ) } diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs b/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs index 69525bd759210..bca7162de8b2d 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs @@ -189,7 +189,7 @@ fn get_ignored_fields(reflect_struct: &ReflectStruct) -> MemberValuePair { reflect_struct .ignored_fields() .map(|field| { - let member = ident_or_index(field.data.ident.as_ref(), field.index); + let member = ident_or_index(field.data.ident.as_ref(), field.declaration_index); let value = match &field.attrs.default { DefaultBehavior::Func(path) => quote! {#path()}, @@ -218,8 +218,12 @@ fn get_active_fields( reflect_struct .active_fields() .map(|field| { - let member = ident_or_index(field.data.ident.as_ref(), field.index); - let accessor = get_field_accessor(field.data, field.index, is_tuple); + let member = ident_or_index(field.data.ident.as_ref(), field.declaration_index); + let accessor = get_field_accessor( + field.data, + field.reflection_index.expect("field should be active"), + is_tuple, + ); let ty = field.data.ty.clone(); let get_field = quote! { diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/impls/enums.rs b/crates/bevy_reflect/bevy_reflect_derive/src/impls/enums.rs index 8eec84fcac678..a733ec2e262bf 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/impls/enums.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/impls/enums.rs @@ -346,7 +346,11 @@ fn generate_impls(reflect_enum: &ReflectEnum, ref_index: &Ident, ref_name: &Iden // Ignored field continue; } - constructor_argument.push(generate_for_field(reflect_idx, field.index, field)); + constructor_argument.push(generate_for_field( + reflect_idx, + field.declaration_index, + field, + )); reflect_idx += 1; } constructor_argument diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/impls/structs.rs b/crates/bevy_reflect/bevy_reflect_derive/src/impls/structs.rs index 60a5c14cbc369..1bf46968cebdc 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/impls/structs.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/impls/structs.rs @@ -19,12 +19,12 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS .ident .as_ref() .map(|i| i.to_string()) - .unwrap_or_else(|| field.index.to_string()) + .unwrap_or_else(|| field.declaration_index.to_string()) }) .collect::>(); let field_idents = reflect_struct .active_fields() - .map(|field| ident_or_index(field.data.ident.as_ref(), field.index)) + .map(|field| ident_or_index(field.data.ident.as_ref(), field.declaration_index)) .collect::>(); let field_types = reflect_struct.active_types(); let field_count = field_idents.len(); diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/impls/tuple_structs.rs b/crates/bevy_reflect/bevy_reflect_derive/src/impls/tuple_structs.rs index ed507f3714d10..e05226d7a52b6 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/impls/tuple_structs.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/impls/tuple_structs.rs @@ -14,7 +14,7 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: let field_idents = reflect_struct .active_fields() - .map(|field| Member::Unnamed(Index::from(field.index))) + .map(|field| Member::Unnamed(Index::from(field.declaration_index))) .collect::>(); let field_types = reflect_struct.active_types(); let field_count = field_idents.len(); diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs b/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs index 5474f143cda99..e87d3ccf5c8d3 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs @@ -24,6 +24,7 @@ mod from_reflect; mod impls; mod reflect_value; mod registration; +mod serialization; mod trait_reflection; mod type_path; mod type_uuid; @@ -201,8 +202,10 @@ pub fn derive_reflect(input: TokenStream) -> TokenStream { }; TokenStream::from(quote! { - #reflect_impls - #from_reflect_impl + const _: () = { + #reflect_impls + #from_reflect_impl + }; }) } @@ -241,15 +244,20 @@ pub fn derive_from_reflect(input: TokenStream) -> TokenStream { Err(err) => return err.into_compile_error().into(), }; - match derive_data { + let from_reflect_impl = match derive_data { ReflectDerive::Struct(struct_data) | ReflectDerive::UnitStruct(struct_data) => { from_reflect::impl_struct(&struct_data) } ReflectDerive::TupleStruct(struct_data) => from_reflect::impl_tuple_struct(&struct_data), ReflectDerive::Enum(meta) => from_reflect::impl_enum(&meta), ReflectDerive::Value(meta) => from_reflect::impl_value(&meta), - } - .into() + }; + + TokenStream::from(quote! { + const _: () = { + #from_reflect_impl + }; + }) } /// Derives the `TypePath` trait, providing a stable alternative to [`std::any::type_name`]. @@ -275,21 +283,31 @@ pub fn derive_type_path(input: TokenStream) -> TokenStream { Err(err) => return err.into_compile_error().into(), }; - impls::impl_type_path( + let type_path_impl = impls::impl_type_path( derive_data.meta(), // Use `WhereClauseOptions::new_value` here so we don't enforce reflection bounds &WhereClauseOptions::new_value(derive_data.meta()), - ) - .into() + ); + + TokenStream::from(quote! { + const _: () = { + #type_path_impl + }; + }) } // From https://github.com/randomPoison/type-uuid #[proc_macro_derive(TypeUuid, attributes(uuid))] pub fn derive_type_uuid(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); - type_uuid::type_uuid_derive(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() + let uuid_impl = + type_uuid::type_uuid_derive(input).unwrap_or_else(syn::Error::into_compile_error); + + TokenStream::from(quote! { + const _: () = { + #uuid_impl + }; + }) } /// A macro that automatically generates type data for traits, which their implementors can then register. @@ -401,8 +419,10 @@ pub fn impl_reflect_value(input: TokenStream) -> TokenStream { let from_reflect_impl = from_reflect::impl_value(&meta); TokenStream::from(quote! { - #reflect_impls - #from_reflect_impl + const _: () = { + #reflect_impls + #from_reflect_impl + }; }) } @@ -446,7 +466,7 @@ pub fn impl_reflect_struct(input: TokenStream) -> TokenStream { Err(err) => return err.into_compile_error().into(), }; - match derive_data { + let output = match derive_data { ReflectDerive::Struct(struct_data) => { if !struct_data.meta().type_path().has_custom_path() { return syn::Error::new( @@ -460,27 +480,30 @@ pub fn impl_reflect_struct(input: TokenStream) -> TokenStream { let impl_struct = impls::impl_struct(&struct_data); let impl_from_struct = from_reflect::impl_struct(&struct_data); - TokenStream::from(quote! { + quote! { #impl_struct #impl_from_struct - }) + } } ReflectDerive::TupleStruct(..) => syn::Error::new( ast.span(), "impl_reflect_struct does not support tuple structs", ) - .into_compile_error() - .into(), + .into_compile_error(), ReflectDerive::UnitStruct(..) => syn::Error::new( ast.span(), "impl_reflect_struct does not support unit structs", ) - .into_compile_error() - .into(), + .into_compile_error(), _ => syn::Error::new(ast.span(), "impl_reflect_struct only supports structs") - .into_compile_error() - .into(), - } + .into_compile_error(), + }; + + TokenStream::from(quote! { + const _: () = { + #output + }; + }) } /// A macro used to generate a `FromReflect` trait implementation for the given type. @@ -521,7 +544,14 @@ pub fn impl_from_reflect_value(input: TokenStream) -> TokenStream { } }; - from_reflect::impl_value(&ReflectMeta::new(type_path, def.traits.unwrap_or_default())).into() + let from_reflect_impl = + from_reflect::impl_value(&ReflectMeta::new(type_path, def.traits.unwrap_or_default())); + + TokenStream::from(quote! { + const _: () = { + #from_reflect_impl + }; + }) } /// A replacement for [deriving `TypePath`] for use on foreign types. @@ -583,12 +613,24 @@ pub fn impl_type_path(input: TokenStream) -> TokenStream { let meta = ReflectMeta::new(type_path, ReflectTraits::default()); - impls::impl_type_path(&meta, &WhereClauseOptions::new_value(&meta)).into() + let type_path_impl = impls::impl_type_path(&meta, &WhereClauseOptions::new_value(&meta)); + + TokenStream::from(quote! { + const _: () = { + #type_path_impl + }; + }) } /// Derives `TypeUuid` for the given type. This is used internally to implement `TypeUuid` on foreign types, such as those in the std. This macro should be used in the format of `<[Generic Params]> [Type (Path)], [Uuid (String Literal)]`. #[proc_macro] pub fn impl_type_uuid(input: TokenStream) -> TokenStream { let def = parse_macro_input!(input as type_uuid::TypeUuidDef); - type_uuid::gen_impl_type_uuid(def).into() + let uuid_impl = type_uuid::gen_impl_type_uuid(def); + + TokenStream::from(quote! { + const _: () = { + #uuid_impl + }; + }) } diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/registration.rs b/crates/bevy_reflect/bevy_reflect_derive/src/registration.rs index 0b0a31e0a38fd..115274ad46ae1 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/registration.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/registration.rs @@ -1,8 +1,8 @@ //! Contains code related specifically to Bevy's type registration. use crate::derive_data::ReflectMeta; +use crate::serialization::SerializationDataDef; use crate::utility::{extend_where_clause, WhereClauseOptions}; -use bit_set::BitSet; use quote::quote; /// Creates the `GetTypeRegistration` impl for the given type data. @@ -10,7 +10,7 @@ use quote::quote; pub(crate) fn impl_get_type_registration( meta: &ReflectMeta, where_clause_options: &WhereClauseOptions, - serialization_denylist: Option<&BitSet>, + serialization_data: Option<&SerializationDataDef>, ) -> proc_macro2::TokenStream { let type_path = meta.type_path(); let bevy_reflect_path = meta.bevy_reflect_path(); @@ -20,17 +20,16 @@ pub(crate) fn impl_get_type_registration( let from_reflect_data = if meta.from_reflect().should_auto_derive() { Some(quote! { - registration.insert::<#bevy_reflect_path::ReflectFromReflect>(#bevy_reflect_path::FromType::::from_type()); + registration.insert::<#bevy_reflect_path::ReflectFromReflect>(#bevy_reflect_path::FromType::::from_type()); }) } else { None }; - let serialization_data = serialization_denylist.map(|denylist| { - let denylist = denylist.into_iter(); + let serialization_data = serialization_data.map(|data| { + let serialization_data = data.as_serialization_data(bevy_reflect_path); quote! { - let ignored_indices = ::core::iter::IntoIterator::into_iter([#(#denylist),*]); - registration.insert::<#bevy_reflect_path::serde::SerializationData>(#bevy_reflect_path::serde::SerializationData::new(ignored_indices)); + registration.insert::<#bevy_reflect_path::serde::SerializationData>(#serialization_data); } }); diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/serialization.rs b/crates/bevy_reflect/bevy_reflect_derive/src/serialization.rs new file mode 100644 index 0000000000000..0242947b5c91f --- /dev/null +++ b/crates/bevy_reflect/bevy_reflect_derive/src/serialization.rs @@ -0,0 +1,91 @@ +use crate::derive_data::StructField; +use crate::field_attributes::{DefaultBehavior, ReflectIgnoreBehavior}; +use bevy_macro_utils::fq_std::{FQBox, FQDefault}; +use quote::quote; +use std::collections::HashMap; +use syn::spanned::Spanned; +use syn::Path; + +type ReflectionIndex = usize; + +/// Collected serialization data used to generate a `SerializationData` type. +pub(crate) struct SerializationDataDef { + /// Maps a field's _reflection_ index to its [`SkippedFieldDef`] if marked as `#[reflect(skip_serializing)]`. + skipped: HashMap, +} + +impl SerializationDataDef { + /// Attempts to create a new `SerializationDataDef` from the given collection of fields. + /// + /// Returns `Ok(Some(data))` if there are any fields needing to be skipped during serialization. + /// Otherwise, returns `Ok(None)`. + pub fn new(fields: &[StructField<'_>]) -> Result, syn::Error> { + let mut skipped = HashMap::default(); + + for field in fields { + match field.attrs.ignore { + ReflectIgnoreBehavior::IgnoreSerialization => { + skipped.insert( + field.reflection_index.ok_or_else(|| { + syn::Error::new( + field.data.span(), + "internal error: field is missing a reflection index", + ) + })?, + SkippedFieldDef::new(field)?, + ); + } + _ => continue, + } + } + + if skipped.is_empty() { + Ok(None) + } else { + Ok(Some(Self { skipped })) + } + } + + /// Returns a `TokenStream` containing an initialized `SerializationData` type. + pub fn as_serialization_data(&self, bevy_reflect_path: &Path) -> proc_macro2::TokenStream { + let fields = + self.skipped + .iter() + .map(|(reflection_index, SkippedFieldDef { default_fn })| { + quote! {( + #reflection_index, + #bevy_reflect_path::serde::SkippedField::new(#default_fn) + )} + }); + quote! { + #bevy_reflect_path::serde::SerializationData::new( + ::core::iter::IntoIterator::into_iter([#(#fields),*]) + ) + } + } +} + +/// Collected field data used to generate a `SkippedField` type. +pub(crate) struct SkippedFieldDef { + /// The default function for this field. + /// + /// This is of type `fn() -> Box`. + default_fn: proc_macro2::TokenStream, +} + +impl SkippedFieldDef { + pub fn new(field: &StructField<'_>) -> Result { + let ty = &field.data.ty; + + let default_fn = match &field.attrs.default { + DefaultBehavior::Func(func) => quote! { + || { #FQBox::new(#func()) } + }, + _ => quote! { + || { #FQBox::new(<#ty as #FQDefault>::default()) } + }, + }; + + Ok(Self { default_fn }) + } +} diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/utility.rs b/crates/bevy_reflect/bevy_reflect_derive/src/utility.rs index 9d25e35a37533..0cd4c88b4cae9 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/utility.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/utility.rs @@ -1,12 +1,10 @@ //! General-purpose utility functions for internal usage within this crate. use crate::derive_data::{ReflectMeta, StructField}; -use crate::field_attributes::ReflectIgnoreBehavior; use bevy_macro_utils::{ fq_std::{FQAny, FQOption, FQSend, FQSync}, BevyManifest, }; -use bit_set::BitSet; use proc_macro2::{Ident, Span}; use quote::{quote, ToTokens}; use syn::{spanned::Spanned, LitStr, Member, Path, Type, WhereClause}; @@ -286,42 +284,6 @@ impl ResultSifter { } } -/// Converts an iterator over ignore behavior of members to a bitset of ignored members. -/// -/// Takes into account the fact that always ignored (non-reflected) members are skipped. -/// -/// # Example -/// ```rust,ignore -/// pub struct HelloWorld { -/// reflected_field: u32 // index: 0 -/// -/// #[reflect(ignore)] -/// non_reflected_field: u32 // index: N/A (not 1!) -/// -/// #[reflect(skip_serializing)] -/// non_serialized_field: u32 // index: 1 -/// } -/// ``` -/// Would convert to the `0b01` bitset (i.e second field is NOT serialized) -/// -pub(crate) fn members_to_serialization_denylist(member_iter: T) -> BitSet -where - T: Iterator, -{ - let mut bitset = BitSet::default(); - - member_iter.fold(0, |next_idx, member| match member { - ReflectIgnoreBehavior::IgnoreAlways => next_idx, - ReflectIgnoreBehavior::IgnoreSerialization => { - bitset.insert(next_idx); - next_idx + 1 - } - ReflectIgnoreBehavior::None => next_idx + 1, - }); - - bitset -} - /// Turns an `Option` into a `TokenStream` for an `Option`. pub(crate) fn wrap_in_option(tokens: Option) -> proc_macro2::TokenStream { match tokens { diff --git a/crates/bevy_reflect/src/lib.rs b/crates/bevy_reflect/src/lib.rs index b01adece410bb..1a02cf4ed838d 100644 --- a/crates/bevy_reflect/src/lib.rs +++ b/crates/bevy_reflect/src/lib.rs @@ -764,6 +764,39 @@ mod tests { .unwrap_or_default()); } + #[test] + fn from_reflect_should_allow_ignored_unnamed_fields() { + #[derive(Reflect, Eq, PartialEq, Debug)] + struct MyTupleStruct(i8, #[reflect(ignore)] i16, i32); + + let expected = MyTupleStruct(1, 0, 3); + + let mut dyn_tuple_struct = DynamicTupleStruct::default(); + dyn_tuple_struct.insert(1_i8); + dyn_tuple_struct.insert(3_i32); + let my_tuple_struct = ::from_reflect(&dyn_tuple_struct); + + assert_eq!(Some(expected), my_tuple_struct); + + #[derive(Reflect, Eq, PartialEq, Debug)] + enum MyEnum { + Tuple(i8, #[reflect(ignore)] i16, i32), + } + + let expected = MyEnum::Tuple(1, 0, 3); + + let mut dyn_tuple = DynamicTuple::default(); + dyn_tuple.insert(1_i8); + dyn_tuple.insert(3_i32); + + let mut dyn_enum = DynamicEnum::default(); + dyn_enum.set_variant("Tuple", dyn_tuple); + + let my_enum = ::from_reflect(&dyn_enum); + + assert_eq!(Some(expected), my_enum); + } + #[test] fn from_reflect_should_use_default_field_attributes() { #[derive(Reflect, Eq, PartialEq, Debug)] diff --git a/crates/bevy_reflect/src/serde/de.rs b/crates/bevy_reflect/src/serde/de.rs index 38f1795186d9a..170c6c941cf1f 100644 --- a/crates/bevy_reflect/src/serde/de.rs +++ b/crates/bevy_reflect/src/serde/de.rs @@ -2,9 +2,8 @@ use crate::serde::SerializationData; use crate::{ ArrayInfo, DynamicArray, DynamicEnum, DynamicList, DynamicMap, DynamicStruct, DynamicTuple, DynamicTupleStruct, DynamicVariant, EnumInfo, ListInfo, Map, MapInfo, NamedField, Reflect, - ReflectDeserialize, StructInfo, StructVariantInfo, Tuple, TupleInfo, TupleStruct, - TupleStructInfo, TupleVariantInfo, TypeInfo, TypeRegistration, TypeRegistry, UnnamedField, - VariantInfo, + ReflectDeserialize, StructInfo, StructVariantInfo, TupleInfo, TupleStructInfo, + TupleVariantInfo, TypeInfo, TypeRegistration, TypeRegistry, UnnamedField, VariantInfo, }; use erased_serde::Deserializer; use serde::de::{ @@ -27,6 +26,8 @@ pub trait DeserializeValue { trait StructLikeInfo { fn get_path(&self) -> &str; fn get_field(&self, name: &str) -> Option<&NamedField>; + fn field_at(&self, index: usize) -> Option<&NamedField>; + fn get_field_len(&self) -> usize; fn iter_fields(&self) -> Iter<'_, NamedField>; } @@ -49,10 +50,18 @@ impl StructLikeInfo for StructInfo { self.type_path() } + fn field_at(&self, index: usize) -> Option<&NamedField> { + self.field_at(index) + } + fn get_field(&self, name: &str) -> Option<&NamedField> { self.field(name) } + fn get_field_len(&self) -> usize { + self.field_len() + } + fn iter_fields(&self) -> Iter<'_, NamedField> { self.iter() } @@ -80,10 +89,18 @@ impl StructLikeInfo for StructVariantInfo { self.name() } + fn field_at(&self, index: usize) -> Option<&NamedField> { + self.field_at(index) + } + fn get_field(&self, name: &str) -> Option<&NamedField> { self.field(name) } + fn get_field_len(&self) -> usize { + self.field_len() + } + fn iter_fields(&self) -> Iter<'_, NamedField> { self.iter() } @@ -120,6 +137,54 @@ impl TupleLikeInfo for TupleInfo { } } +impl Container for TupleInfo { + fn get_field_registration<'a, E: Error>( + &self, + index: usize, + registry: &'a TypeRegistry, + ) -> Result<&'a TypeRegistration, E> { + let field = self.field_at(index).ok_or_else(|| { + de::Error::custom(format_args!( + "no field at index {} on tuple {}", + index, + self.type_path(), + )) + })?; + get_registration(field.type_id(), field.type_path(), registry) + } +} + +impl TupleLikeInfo for TupleStructInfo { + fn get_path(&self) -> &str { + self.type_path() + } + + fn get_field(&self, index: usize) -> Option<&UnnamedField> { + self.field_at(index) + } + + fn get_field_len(&self) -> usize { + self.field_len() + } +} + +impl Container for TupleStructInfo { + fn get_field_registration<'a, E: Error>( + &self, + index: usize, + registry: &'a TypeRegistry, + ) -> Result<&'a TypeRegistration, E> { + let field = self.field_at(index).ok_or_else(|| { + de::Error::custom(format_args!( + "no field at index {} on tuple struct {}", + index, + self.type_path(), + )) + })?; + get_registration(field.type_id(), field.type_path(), registry) + } +} + impl TupleLikeInfo for TupleVariantInfo { fn get_path(&self) -> &str { self.name() @@ -134,6 +199,23 @@ impl TupleLikeInfo for TupleVariantInfo { } } +impl Container for TupleVariantInfo { + fn get_field_registration<'a, E: Error>( + &self, + index: usize, + registry: &'a TypeRegistry, + ) -> Result<&'a TypeRegistration, E> { + let field = self.field_at(index).ok_or_else(|| { + de::Error::custom(format_args!( + "no field at index {} on tuple variant {}", + index, + self.name(), + )) + })?; + get_registration(field.type_id(), field.type_path(), registry) + } +} + /// A debug struct used for error messages that displays a list of expected values. /// /// # Example @@ -444,6 +526,7 @@ impl<'a, 'de> DeserializeSeed<'de> for TypedReflectDeserializer<'a> { tuple_info.field_len(), TupleVisitor { tuple_info, + registration: self.registration, registry: self.registry, }, )?; @@ -500,43 +583,14 @@ impl<'a, 'de> Visitor<'de> for StructVisitor<'a> { where V: MapAccess<'de>, { - visit_struct(&mut map, self.struct_info, self.registry) + visit_struct(&mut map, self.struct_info, self.registration, self.registry) } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { - let mut index = 0usize; - let mut output = DynamicStruct::default(); - - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - let field_len = self.struct_info.field_len().saturating_sub(ignored_len); - - if field_len == 0 { - // Handle unit structs and ignored fields - return Ok(output); - } - - while let Some(value) = seq.next_element_seed(TypedReflectDeserializer { - registration: self - .struct_info - .get_field_registration(index, self.registry)?, - registry: self.registry, - })? { - let name = self.struct_info.field_at(index).unwrap().name(); - output.insert_boxed(name, value); - index += 1; - if index >= self.struct_info.field_len() { - break; - } - } - - Ok(output) + visit_struct_seq(&mut seq, self.struct_info, self.registration, self.registry) } } @@ -557,64 +611,19 @@ impl<'a, 'de> Visitor<'de> for TupleStructVisitor<'a> { where V: SeqAccess<'de>, { - let mut index = 0usize; - let mut tuple_struct = DynamicTupleStruct::default(); - - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - let field_len = self - .tuple_struct_info - .field_len() - .saturating_sub(ignored_len); - - if field_len == 0 { - // Handle unit structs and ignored fields - return Ok(tuple_struct); - } - - let get_field_registration = |index: usize| -> Result<&'a TypeRegistration, V::Error> { - let field = self.tuple_struct_info.field_at(index).ok_or_else(|| { - de::Error::custom(format_args!( - "no field at index {} on tuple {}", - index, - self.tuple_struct_info.type_path(), - )) - })?; - get_registration(field.type_id(), field.type_path(), self.registry) - }; - - while let Some(value) = seq.next_element_seed(TypedReflectDeserializer { - registration: get_field_registration(index)?, - registry: self.registry, - })? { - tuple_struct.insert_boxed(value); - index += 1; - if index >= self.tuple_struct_info.field_len() { - break; - } - } - - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - if tuple_struct.field_len() != self.tuple_struct_info.field_len() - ignored_len { - return Err(Error::invalid_length( - tuple_struct.field_len(), - &self.tuple_struct_info.field_len().to_string().as_str(), - )); - } - - Ok(tuple_struct) + visit_tuple( + &mut seq, + self.tuple_struct_info, + self.registration, + self.registry, + ) + .map(DynamicTupleStruct::from) } } struct TupleVisitor<'a> { tuple_info: &'static TupleInfo, + registration: &'a TypeRegistration, registry: &'a TypeRegistry, } @@ -629,7 +638,7 @@ impl<'a, 'de> Visitor<'de> for TupleVisitor<'a> { where V: SeqAccess<'de>, { - visit_tuple(&mut seq, self.tuple_info, self.registry) + visit_tuple(&mut seq, self.tuple_info, self.registration, self.registry) } } @@ -782,9 +791,7 @@ impl<'a, 'de> Visitor<'de> for EnumVisitor<'a> { )? .into(), VariantInfo::Tuple(tuple_info) if tuple_info.field_len() == 1 => { - let field = tuple_info.field_at(0).unwrap(); - let registration = - get_registration(field.type_id(), field.type_path(), self.registry)?; + let registration = tuple_info.get_field_registration(0, self.registry)?; let value = variant.newtype_variant_seed(TypedReflectDeserializer { registration, registry: self.registry, @@ -879,43 +886,14 @@ impl<'a, 'de> Visitor<'de> for StructVariantVisitor<'a> { where V: MapAccess<'de>, { - visit_struct(&mut map, self.struct_info, self.registry) + visit_struct(&mut map, self.struct_info, self.registration, self.registry) } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { - let mut index = 0usize; - let mut output = DynamicStruct::default(); - - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - let field_len = self.struct_info.field_len().saturating_sub(ignored_len); - - if field_len == 0 { - // Handle all fields being ignored - return Ok(output); - } - - while let Some(value) = seq.next_element_seed(TypedReflectDeserializer { - registration: self - .struct_info - .get_field_registration(index, self.registry)?, - registry: self.registry, - })? { - let name = self.struct_info.field_at(index).unwrap().name(); - output.insert_boxed(name, value); - index += 1; - if index >= self.struct_info.field_len() { - break; - } - } - - Ok(output) + visit_struct_seq(&mut seq, self.struct_info, self.registration, self.registry) } } @@ -936,19 +914,7 @@ impl<'a, 'de> Visitor<'de> for TupleVariantVisitor<'a> { where V: SeqAccess<'de>, { - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - let field_len = self.tuple_info.field_len().saturating_sub(ignored_len); - - if field_len == 0 { - // Handle all fields being ignored - return Ok(DynamicTuple::default()); - } - - visit_tuple(&mut seq, self.tuple_info, self.registry) + visit_tuple(&mut seq, self.tuple_info, self.registration, self.registry) } } @@ -1005,6 +971,7 @@ impl<'a, 'de> Visitor<'de> for OptionVisitor<'a> { fn visit_struct<'de, T, V>( map: &mut V, info: &'static T, + registration: &TypeRegistration, registry: &TypeRegistry, ) -> Result where @@ -1029,49 +996,101 @@ where dynamic_struct.insert_boxed(&key, value); } + if let Some(serialization_data) = registration.data::() { + for (skipped_index, skipped_field) in serialization_data.iter_skipped() { + let Some(field) = info.field_at(*skipped_index) else { + continue; + }; + dynamic_struct.insert_boxed(field.name(), skipped_field.generate_default()); + } + } + Ok(dynamic_struct) } fn visit_tuple<'de, T, V>( seq: &mut V, info: &T, + registration: &TypeRegistration, registry: &TypeRegistry, ) -> Result where - T: TupleLikeInfo, + T: TupleLikeInfo + Container, V: SeqAccess<'de>, { let mut tuple = DynamicTuple::default(); - let mut index = 0usize; - let get_field_registration = |index: usize| -> Result<&TypeRegistration, V::Error> { - let field = info.get_field(index).ok_or_else(|| { - Error::invalid_length(index, &info.get_field_len().to_string().as_str()) - })?; - get_registration(field.type_id(), field.type_path(), registry) - }; + let len = info.get_field_len(); - while let Some(value) = seq.next_element_seed(TypedReflectDeserializer { - registration: get_field_registration(index)?, - registry, - })? { - tuple.insert_boxed(value); - index += 1; - if index >= info.get_field_len() { - break; + if len == 0 { + // Handle empty tuple/tuple struct + return Ok(tuple); + } + + let serialization_data = registration.data::(); + + for index in 0..len { + if let Some(value) = serialization_data.and_then(|data| data.generate_default(index)) { + tuple.insert_boxed(value); + continue; } + + let value = seq + .next_element_seed(TypedReflectDeserializer { + registration: info.get_field_registration(index, registry)?, + registry, + })? + .ok_or_else(|| Error::invalid_length(index, &len.to_string().as_str()))?; + tuple.insert_boxed(value); } + Ok(tuple) +} + +fn visit_struct_seq<'de, T, V>( + seq: &mut V, + info: &T, + registration: &TypeRegistration, + registry: &TypeRegistry, +) -> Result +where + T: StructLikeInfo + Container, + V: SeqAccess<'de>, +{ + let mut dynamic_struct = DynamicStruct::default(); + let len = info.get_field_len(); - if tuple.field_len() != len { - return Err(Error::invalid_length( - tuple.field_len(), - &len.to_string().as_str(), - )); + if len == 0 { + // Handle unit structs + return Ok(dynamic_struct); } - Ok(tuple) + let serialization_data = registration.data::(); + + for index in 0..len { + let name = info.field_at(index).unwrap().name(); + + if serialization_data + .map(|data| data.is_field_skipped(index)) + .unwrap_or_default() + { + if let Some(value) = serialization_data.unwrap().generate_default(index) { + dynamic_struct.insert_boxed(name, value); + } + continue; + } + + let value = seq + .next_element_seed(TypedReflectDeserializer { + registration: info.get_field_registration(index, registry)?, + registry, + })? + .ok_or_else(|| Error::invalid_length(index, &len.to_string().as_str()))?; + dynamic_struct.insert_boxed(name, value); + } + + Ok(dynamic_struct) } fn get_registration<'a, E: Error>( diff --git a/crates/bevy_reflect/src/serde/mod.rs b/crates/bevy_reflect/src/serde/mod.rs index b675b5dcb2395..c444279fa928a 100644 --- a/crates/bevy_reflect/src/serde/mod.rs +++ b/crates/bevy_reflect/src/serde/mod.rs @@ -8,11 +8,11 @@ pub use type_data::*; #[cfg(test)] mod tests { - use crate::{self as bevy_reflect, DynamicTupleStruct}; + use crate::{self as bevy_reflect, DynamicTupleStruct, Struct}; use crate::{ serde::{ReflectSerializer, UntypedReflectDeserializer}, type_registry::TypeRegistry, - DynamicStruct, Reflect, + DynamicStruct, FromReflect, Reflect, }; use serde::de::DeserializeSeed; @@ -26,7 +26,14 @@ mod tests { b: i32, #[reflect(skip_serializing)] c: i32, + #[reflect(skip_serializing)] + #[reflect(default = "custom_default")] d: i32, + e: i32, + } + + fn custom_default() -> i32 { + -1 } let mut registry = TypeRegistry::default(); @@ -37,24 +44,42 @@ mod tests { b: 4, c: 5, d: 6, + e: 7, }; let serializer = ReflectSerializer::new(&test_struct, ®istry); let serialized = ron::ser::to_string_pretty(&serializer, ron::ser::PrettyConfig::default()).unwrap(); - let mut expected = DynamicStruct::default(); - expected.insert("a", 3); - expected.insert("d", 6); - let mut deserializer = ron::de::Deserializer::from_str(&serialized).unwrap(); let reflect_deserializer = UntypedReflectDeserializer::new(®istry); let value = reflect_deserializer.deserialize(&mut deserializer).unwrap(); let deserialized = value.take::().unwrap(); + let mut expected = DynamicStruct::default(); + expected.insert("a", 3); + // Ignored: expected.insert("b", 0); + expected.insert("c", 0); + expected.insert("d", -1); + expected.insert("e", 7); + assert!( expected.reflect_partial_eq(&deserialized).unwrap(), - "Expected {expected:?} found {deserialized:?}" + "Deserialization failed: expected {expected:?} found {deserialized:?}" + ); + + let expected = TestStruct { + a: 3, + b: 0, + c: 0, + d: -1, + e: 7, + }; + let received = ::from_reflect(&deserialized).unwrap(); + + assert_eq!( + expected, received, + "FromReflect failed: expected {expected:?} found {received:?}" ); } @@ -66,36 +91,56 @@ mod tests { i32, #[reflect(ignore)] i32, #[reflect(skip_serializing)] i32, + #[reflect(skip_serializing)] + #[reflect(default = "custom_default")] + i32, i32, ); + fn custom_default() -> i32 { + -1 + } + let mut registry = TypeRegistry::default(); registry.register::(); - let test_struct = TestStruct(3, 4, 5, 6); + let test_struct = TestStruct(3, 4, 5, 6, 7); let serializer = ReflectSerializer::new(&test_struct, ®istry); let serialized = ron::ser::to_string_pretty(&serializer, ron::ser::PrettyConfig::default()).unwrap(); - let mut expected = DynamicTupleStruct::default(); - expected.insert(3); - expected.insert(6); - let mut deserializer = ron::de::Deserializer::from_str(&serialized).unwrap(); let reflect_deserializer = UntypedReflectDeserializer::new(®istry); let value = reflect_deserializer.deserialize(&mut deserializer).unwrap(); let deserialized = value.take::().unwrap(); + let mut expected = DynamicTupleStruct::default(); + expected.insert(3); + // Ignored: expected.insert(0); + expected.insert(0); + expected.insert(-1); + expected.insert(7); + assert!( expected.reflect_partial_eq(&deserialized).unwrap(), - "Expected {expected:?} found {deserialized:?}" + "Deserialization failed: expected {expected:?} found {deserialized:?}" + ); + + let expected = TestStruct(3, 0, 0, -1, 7); + let received = ::from_reflect(&deserialized).unwrap(); + + assert_eq!( + expected, received, + "FromReflect failed: expected {expected:?} found {received:?}" ); } #[test] - #[should_panic(expected = "cannot get type info for bevy_reflect::DynamicStruct")] - fn unproxied_dynamic_should_not_serialize() { + #[should_panic( + expected = "cannot serialize dynamic value without represented type: bevy_reflect::DynamicStruct" + )] + fn should_not_serialize_unproxied_dynamic() { let registry = TypeRegistry::default(); let mut value = DynamicStruct::default(); @@ -104,4 +149,36 @@ mod tests { let serializer = ReflectSerializer::new(&value, ®istry); ron::ser::to_string(&serializer).unwrap(); } + + #[test] + fn should_roundtrip_proxied_dynamic() { + #[derive(Reflect)] + struct TestStruct { + a: i32, + b: i32, + } + + let mut registry = TypeRegistry::default(); + registry.register::(); + + let value: DynamicStruct = TestStruct { a: 123, b: 456 }.clone_dynamic(); + + let serializer = ReflectSerializer::new(&value, ®istry); + + let expected = r#"{"bevy_reflect::serde::tests::TestStruct":(a:123,b:456)}"#; + let result = ron::ser::to_string(&serializer).unwrap(); + assert_eq!(expected, result); + + let mut deserializer = ron::de::Deserializer::from_str(&result).unwrap(); + let reflect_deserializer = UntypedReflectDeserializer::new(®istry); + + let expected = value.clone_value(); + let result = reflect_deserializer + .deserialize(&mut deserializer) + .unwrap() + .take::() + .unwrap(); + + assert!(expected.reflect_partial_eq(&result).unwrap()); + } } diff --git a/crates/bevy_reflect/src/serde/ser.rs b/crates/bevy_reflect/src/serde/ser.rs index da0b464893dcf..fc072a8d2de18 100644 --- a/crates/bevy_reflect/src/serde/ser.rs +++ b/crates/bevy_reflect/src/serde/ser.rs @@ -68,7 +68,22 @@ impl<'a> Serialize for ReflectSerializer<'a> { { let mut state = serializer.serialize_map(Some(1))?; state.serialize_entry( - self.value.reflect_type_path(), + self.value + .get_represented_type_info() + .ok_or_else(|| { + if self.value.is_dynamic() { + Error::custom(format_args!( + "cannot serialize dynamic value without represented type: {}", + self.value.reflect_type_path() + )) + } else { + Error::custom(format_args!( + "cannot get type info for {}", + self.value.reflect_type_path() + )) + } + })? + .type_path(), &TypedReflectSerializer::new(self.value, self.registry), )?; state.end() @@ -197,7 +212,7 @@ impl<'a> Serialize for StructSerializer<'a> { for (index, value) in self.struct_value.iter_fields().enumerate() { if serialization_data - .map(|data| data.is_ignored_field(index)) + .map(|data| data.is_field_skipped(index)) .unwrap_or(false) { continue; @@ -250,7 +265,7 @@ impl<'a> Serialize for TupleStructSerializer<'a> { for (index, value) in self.tuple_struct.iter_fields().enumerate() { if serialization_data - .map(|data| data.is_ignored_field(index)) + .map(|data| data.is_field_skipped(index)) .unwrap_or(false) { continue; diff --git a/crates/bevy_reflect/src/serde/type_data.rs b/crates/bevy_reflect/src/serde/type_data.rs index ee69a390d09cb..d82f3b4579095 100644 --- a/crates/bevy_reflect/src/serde/type_data.rs +++ b/crates/bevy_reflect/src/serde/type_data.rs @@ -1,44 +1,136 @@ -use std::collections::HashSet; +use crate::Reflect; +use bevy_utils::hashbrown::hash_map::Iter; +use bevy_utils::HashMap; -/// Contains data relevant to the automatic reflect powered serialization of a type +/// Contains data relevant to the automatic reflect powered (de)serialization of a type. #[derive(Debug, Clone)] pub struct SerializationData { - ignored_field_indices: HashSet, + skipped_fields: HashMap, } impl SerializationData { - /// Creates a new `SerializationData` instance given: + /// Creates a new `SerializationData` instance with the given skipped fields. /// - /// - `ignored_iter`: the iterator of member indices to be ignored during serialization. Indices are assigned only to reflected members, those which are not reflected are skipped. - pub fn new>(ignored_iter: I) -> Self { + /// # Arguments + /// + /// * `skipped_iter`: The iterator of field indices to be skipped during (de)serialization. + /// Indices are assigned only to reflected fields. + /// Ignored fields (i.e. those marked `#[reflect(ignore)]`) are implicitly skipped + /// and do not need to be included in this iterator. + pub fn new>(skipped_iter: I) -> Self { Self { - ignored_field_indices: ignored_iter.collect(), + skipped_fields: skipped_iter.collect(), } } - /// Returns true if the given index corresponds to a field meant to be ignored in serialization. - /// - /// Indices start from 0 and ignored fields are skipped. + /// Returns true if the given index corresponds to a field meant to be skipped during (de)serialization. /// /// # Example /// - /// ```rust,ignore + /// ``` + /// # use std::any::TypeId; + /// # use bevy_reflect::{Reflect, Struct, TypeRegistry, serde::SerializationData}; + /// #[derive(Reflect)] + /// struct MyStruct { + /// serialize_me: i32, + /// #[reflect(skip_serializing)] + /// skip_me: i32 + /// } + /// + /// let mut registry = TypeRegistry::new(); + /// registry.register::(); + /// + /// let my_struct = MyStruct { + /// serialize_me: 123, + /// skip_me: 321, + /// }; + /// + /// let serialization_data = registry.get_type_data::(TypeId::of::()).unwrap(); + /// /// for (idx, field) in my_struct.iter_fields().enumerate(){ - /// if serialization_data.is_ignored_field(idx){ - /// // serialize ... - /// } + /// if serialization_data.is_field_skipped(idx) { + /// // Skipped! + /// assert_eq!(1, idx); + /// } else { + /// // Not Skipped! + /// assert_eq!(0, idx); + /// } + /// } + /// ``` + pub fn is_field_skipped(&self, index: usize) -> bool { + self.skipped_fields.contains_key(&index) + } + + /// Generates a default instance of the skipped field at the given index. + /// + /// Returns `None` if the field is not skipped. + /// + /// # Example + /// + /// ``` + /// # use std::any::TypeId; + /// # use bevy_reflect::{Reflect, Struct, TypeRegistry, serde::SerializationData}; + /// #[derive(Reflect)] + /// struct MyStruct { + /// serialize_me: i32, + /// #[reflect(skip_serializing)] + /// #[reflect(default = "skip_me_default")] + /// skip_me: i32 /// } + /// + /// fn skip_me_default() -> i32 { + /// 789 + /// } + /// + /// let mut registry = TypeRegistry::new(); + /// registry.register::(); + /// + /// let serialization_data = registry.get_type_data::(TypeId::of::()).unwrap(); + /// assert_eq!(789, serialization_data.generate_default(1).unwrap().take::().unwrap()); /// ``` - pub fn is_ignored_field(&self, index: usize) -> bool { - self.ignored_field_indices.contains(&index) + pub fn generate_default(&self, index: usize) -> Option> { + self.skipped_fields + .get(&index) + .map(|field| field.generate_default()) } - /// Returns the number of ignored fields. + /// Returns the number of skipped fields. pub fn len(&self) -> usize { - self.ignored_field_indices.len() + self.skipped_fields.len() } - /// Returns true if there are no ignored fields. + /// Returns true if there are no skipped fields. pub fn is_empty(&self) -> bool { - self.ignored_field_indices.is_empty() + self.skipped_fields.is_empty() + } + + /// Returns an iterator over the skipped fields. + /// + /// Each item in the iterator is a tuple containing: + /// 1. The reflected index of the field + /// 2. The (de)serialization metadata of the field + pub fn iter_skipped(&self) -> Iter<'_, usize, SkippedField> { + self.skipped_fields.iter() + } +} + +/// Data needed for (de)serialization of a skipped field. +#[derive(Debug, Clone)] +pub struct SkippedField { + default_fn: fn() -> Box, +} + +impl SkippedField { + /// Create a new `SkippedField`. + /// + /// # Arguments + /// + /// * `default_fn`: A function pointer used to generate a default instance of the field. + pub fn new(default_fn: fn() -> Box) -> Self { + Self { default_fn } + } + + /// Generates a default instance of the field. + pub fn generate_default(&self) -> Box { + (self.default_fn)() } } diff --git a/crates/bevy_reflect/src/tuple_struct.rs b/crates/bevy_reflect/src/tuple_struct.rs index 9d12490871980..ff9c53d5481aa 100644 --- a/crates/bevy_reflect/src/tuple_struct.rs +++ b/crates/bevy_reflect/src/tuple_struct.rs @@ -1,8 +1,8 @@ use bevy_reflect_derive::impl_type_path; use crate::{ - self as bevy_reflect, Reflect, ReflectMut, ReflectOwned, ReflectRef, TypeInfo, TypePath, - TypePathTable, UnnamedField, + self as bevy_reflect, DynamicTuple, Reflect, ReflectMut, ReflectOwned, ReflectRef, Tuple, + TypeInfo, TypePath, TypePathTable, UnnamedField, }; use std::any::{Any, TypeId}; use std::fmt::{Debug, Formatter}; @@ -390,6 +390,15 @@ impl Debug for DynamicTupleStruct { } } +impl From for DynamicTupleStruct { + fn from(value: DynamicTuple) -> Self { + Self { + represented_type: None, + fields: Box::new(value).drain(), + } + } +} + /// Compares a [`TupleStruct`] with a [`Reflect`] value. /// /// Returns true if and only if all of the following are true: diff --git a/crates/bevy_reflect/src/type_path.rs b/crates/bevy_reflect/src/type_path.rs index 9861f76289b66..99f9b81e6e468 100644 --- a/crates/bevy_reflect/src/type_path.rs +++ b/crates/bevy_reflect/src/type_path.rs @@ -183,6 +183,10 @@ impl fmt::Debug for TypePathTable { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TypePathVtable") .field("type_path", &self.type_path) + .field("short_type_path", &(self.short_type_path)()) + .field("type_ident", &(self.type_ident)()) + .field("crate_name", &(self.crate_name)()) + .field("module_path", &(self.module_path)()) .finish() } } diff --git a/crates/bevy_render/Cargo.toml b/crates/bevy_render/Cargo.toml index 28622d0f8ada5..9878284469930 100644 --- a/crates/bevy_render/Cargo.toml +++ b/crates/bevy_render/Cargo.toml @@ -62,7 +62,7 @@ codespan-reporting = "0.11.0" # It is enabled for now to avoid having to do a significant overhaul of the renderer just for wasm wgpu = { version = "0.17.1", features = ["naga", "fragile-send-sync-non-atomic-wasm"] } naga = { version = "0.13.0", features = ["wgsl-in"] } -naga_oil = "0.9" +naga_oil = "0.10" serde = { version = "1", features = ["derive"] } bitflags = "2.3" bytemuck = { version = "1.5", features = ["derive"] } diff --git a/crates/bevy_render/macros/src/as_bind_group.rs b/crates/bevy_render/macros/src/as_bind_group.rs index 5602efd8939dc..dca3cea1dfc8d 100644 --- a/crates/bevy_render/macros/src/as_bind_group.rs +++ b/crates/bevy_render/macros/src/as_bind_group.rs @@ -43,7 +43,6 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let mut binding_states: Vec = Vec::new(); let mut binding_impls = Vec::new(); - let mut bind_group_entries = Vec::new(); let mut binding_layouts = Vec::new(); let mut attr_prepared_data_ident = None; @@ -63,13 +62,16 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let mut buffer = #render_path::render_resource::encase::UniformBuffer::new(Vec::new()); let converted: #converted_shader_type = self.as_bind_group_shader_type(images); buffer.write(&converted).unwrap(); - #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( - &#render_path::render_resource::BufferInitDescriptor { - label: None, - usage: #render_path::render_resource::BufferUsages::COPY_DST | #render_path::render_resource::BufferUsages::UNIFORM, - contents: buffer.as_ref(), - }, - )) + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( + &#render_path::render_resource::BufferInitDescriptor { + label: None, + usage: #render_path::render_resource::BufferUsages::COPY_DST | #render_path::render_resource::BufferUsages::UNIFORM, + contents: buffer.as_ref(), + }, + )) + ) }}); binding_layouts.push(quote!{ @@ -85,14 +87,6 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { } }); - let binding_vec_index = bind_group_entries.len(); - bind_group_entries.push(quote! { - #render_path::render_resource::BindGroupEntry { - binding: #binding_index, - resource: bindings[#binding_vec_index].get_binding(), - } - }); - let required_len = binding_index as usize + 1; if required_len > binding_states.len() { binding_states.resize(required_len, BindingState::Free); @@ -164,13 +158,6 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { _ => { // only populate bind group entries for non-uniforms // uniform entries are deferred until the end - let binding_vec_index = bind_group_entries.len(); - bind_group_entries.push(quote! { - #render_path::render_resource::BindGroupEntry { - binding: #binding_index, - resource: bindings[#binding_vec_index].get_binding(), - } - }); BindingState::Occupied { binding_type, ident: field_name, @@ -230,22 +217,28 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { if buffer { binding_impls.push(quote! { - #render_path::render_resource::OwnedBindingResource::Buffer({ - self.#field_name.clone() - }) + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Buffer({ + self.#field_name.clone() + }) + ) }); } else { binding_impls.push(quote! {{ use #render_path::render_resource::AsBindGroupShaderType; let mut buffer = #render_path::render_resource::encase::StorageBuffer::new(Vec::new()); buffer.write(&self.#field_name).unwrap(); - #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( - &#render_path::render_resource::BufferInitDescriptor { - label: None, - usage: #render_path::render_resource::BufferUsages::COPY_DST | #render_path::render_resource::BufferUsages::STORAGE, - contents: buffer.as_ref(), - }, - )) + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( + &#render_path::render_resource::BufferInitDescriptor { + label: None, + usage: #render_path::render_resource::BufferUsages::COPY_DST | #render_path::render_resource::BufferUsages::STORAGE, + contents: buffer.as_ref(), + }, + )) + ) }}); } @@ -276,14 +269,17 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let fallback_image = get_fallback_image(&render_path, *dimension); binding_impls.push(quote! { - #render_path::render_resource::OwnedBindingResource::TextureView({ - let handle: Option<&#asset_path::Handle<#render_path::texture::Image>> = (&self.#field_name).into(); - if let Some(handle) = handle { - images.get(handle).ok_or_else(|| #render_path::render_resource::AsBindGroupError::RetryNextUpdate)?.texture_view.clone() - } else { - #fallback_image.texture_view.clone() - } - }) + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::TextureView({ + let handle: Option<&#asset_path::Handle<#render_path::texture::Image>> = (&self.#field_name).into(); + if let Some(handle) = handle { + images.get(handle).ok_or_else(|| #render_path::render_resource::AsBindGroupError::RetryNextUpdate)?.texture_view.clone() + } else { + #fallback_image.texture_view.clone() + } + }) + ) }); binding_layouts.push(quote! { @@ -315,14 +311,17 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let fallback_image = get_fallback_image(&render_path, *dimension); binding_impls.push(quote! { - #render_path::render_resource::OwnedBindingResource::Sampler({ - let handle: Option<&#asset_path::Handle<#render_path::texture::Image>> = (&self.#field_name).into(); - if let Some(handle) = handle { - images.get(handle).ok_or_else(|| #render_path::render_resource::AsBindGroupError::RetryNextUpdate)?.sampler.clone() - } else { - #fallback_image.sampler.clone() - } - }) + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Sampler({ + let handle: Option<&#asset_path::Handle<#render_path::texture::Image>> = (&self.#field_name).into(); + if let Some(handle) = handle { + images.get(handle).ok_or_else(|| #render_path::render_resource::AsBindGroupError::RetryNextUpdate)?.sampler.clone() + } else { + #fallback_image.sampler.clone() + } + }) + ) }); binding_layouts.push(quote!{ @@ -340,17 +339,12 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { // Produce impls for fields with uniform bindings let struct_name = &ast.ident; + let struct_name_literal = struct_name.to_string(); + let struct_name_literal = struct_name_literal.as_str(); let mut field_struct_impls = Vec::new(); for (binding_index, binding_state) in binding_states.iter().enumerate() { let binding_index = binding_index as u32; if let BindingState::OccupiedMergeableUniform { uniform_fields } = binding_state { - let binding_vec_index = bind_group_entries.len(); - bind_group_entries.push(quote! { - #render_path::render_resource::BindGroupEntry { - binding: #binding_index, - resource: bindings[#binding_vec_index].get_binding(), - } - }); // single field uniform bindings for a given index can use a straightforward binding if uniform_fields.len() == 1 { let field = &uniform_fields[0]; @@ -359,13 +353,16 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { binding_impls.push(quote! {{ let mut buffer = #render_path::render_resource::encase::UniformBuffer::new(Vec::new()); buffer.write(&self.#field_name).unwrap(); - #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( - &#render_path::render_resource::BufferInitDescriptor { - label: None, - usage: #render_path::render_resource::BufferUsages::COPY_DST | #render_path::render_resource::BufferUsages::UNIFORM, - contents: buffer.as_ref(), - }, - )) + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( + &#render_path::render_resource::BufferInitDescriptor { + label: None, + usage: #render_path::render_resource::BufferUsages::COPY_DST | #render_path::render_resource::BufferUsages::UNIFORM, + contents: buffer.as_ref(), + }, + )) + ) }}); binding_layouts.push(quote!{ @@ -402,13 +399,16 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { buffer.write(&#uniform_struct_name { #(#field_name: &self.#field_name,)* }).unwrap(); - #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( - &#render_path::render_resource::BufferInitDescriptor { - label: None, - usage: #render_path::render_resource::BufferUsages::COPY_DST | #render_path::render_resource::BufferUsages::UNIFORM, - contents: buffer.as_ref(), - }, - )) + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( + &#render_path::render_resource::BufferInitDescriptor { + label: None, + usage: #render_path::render_resource::BufferUsages::COPY_DST | #render_path::render_resource::BufferUsages::UNIFORM, + contents: buffer.as_ref(), + }, + )) + ) }}); binding_layouts.push(quote!{ @@ -443,36 +443,28 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { impl #impl_generics #render_path::render_resource::AsBindGroup for #struct_name #ty_generics #where_clause { type Data = #prepared_data; - fn as_bind_group( + + fn label() -> Option<&'static str> { + Some(#struct_name_literal) + } + + fn unprepared_bind_group( &self, layout: &#render_path::render_resource::BindGroupLayout, render_device: &#render_path::renderer::RenderDevice, images: &#render_path::render_asset::RenderAssets<#render_path::texture::Image>, fallback_image: &#render_path::texture::FallbackImage, - ) -> Result<#render_path::render_resource::PreparedBindGroup, #render_path::render_resource::AsBindGroupError> { + ) -> Result<#render_path::render_resource::UnpreparedBindGroup, #render_path::render_resource::AsBindGroupError> { let bindings = vec![#(#binding_impls,)*]; - let bind_group = { - let descriptor = #render_path::render_resource::BindGroupDescriptor { - entries: &[#(#bind_group_entries,)*], - label: None, - layout: &layout, - }; - render_device.create_bind_group(&descriptor) - }; - - Ok(#render_path::render_resource::PreparedBindGroup { + Ok(#render_path::render_resource::UnpreparedBindGroup { bindings, - bind_group, data: #get_prepared_data, }) } - fn bind_group_layout(render_device: &#render_path::renderer::RenderDevice) -> #render_path::render_resource::BindGroupLayout { - render_device.create_bind_group_layout(&#render_path::render_resource::BindGroupLayoutDescriptor { - entries: &[#(#binding_layouts,)*], - label: None, - }) + fn bind_group_layout_entries(render_device: &#render_path::renderer::RenderDevice) -> Vec<#render_path::render_resource::BindGroupLayoutEntry> { + vec![#(#binding_layouts,)*] } } })) diff --git a/crates/bevy_render/src/camera/camera.rs b/crates/bevy_render/src/camera/camera.rs index 5892b9a0818eb..c38757e93ed66 100644 --- a/crates/bevy_render/src/camera/camera.rs +++ b/crates/bevy_render/src/camera/camera.rs @@ -27,7 +27,7 @@ use bevy_window::{ NormalizedWindowRef, PrimaryWindow, Window, WindowCreated, WindowRef, WindowResized, }; use std::{borrow::Cow, ops::Range}; -use wgpu::{BlendState, Extent3d, LoadOp, TextureFormat}; +use wgpu::{BlendState, LoadOp, TextureFormat}; use super::Projection; @@ -511,9 +511,8 @@ impl NormalizedRenderTarget { }), NormalizedRenderTarget::Image(image_handle) => { let image = images.get(image_handle)?; - let Extent3d { width, height, .. } = image.texture_descriptor.size; Some(RenderTargetInfo { - physical_size: UVec2::new(width, height), + physical_size: image.size(), scale_factor: 1.0, }) } diff --git a/crates/bevy_render/src/extract_resource.rs b/crates/bevy_render/src/extract_resource.rs index 4233ef61b5d61..37a21f45bf84c 100644 --- a/crates/bevy_render/src/extract_resource.rs +++ b/crates/bevy_render/src/extract_resource.rs @@ -40,24 +40,26 @@ impl Plugin for ExtractResourcePlugin { /// This system extracts the resource of the corresponding [`Resource`] type pub fn extract_resource( mut commands: Commands, - main_resource: Extract>, + main_resource: Extract>>, target_resource: Option>, #[cfg(debug_assertions)] mut has_warned_on_remove: Local, ) { - if let Some(mut target_resource) = target_resource { - if main_resource.is_changed() { - *target_resource = R::extract_resource(&main_resource); - } - } else { - #[cfg(debug_assertions)] - if !main_resource.is_added() && !*has_warned_on_remove { - *has_warned_on_remove = true; - bevy_log::warn!( - "Removing resource {} from render world not expected, adding using `Commands`. + if let Some(main_resource) = main_resource.as_ref() { + if let Some(mut target_resource) = target_resource { + if main_resource.is_changed() { + *target_resource = R::extract_resource(main_resource); + } + } else { + #[cfg(debug_assertions)] + if !main_resource.is_added() && !*has_warned_on_remove { + *has_warned_on_remove = true; + bevy_log::warn!( + "Removing resource {} from render world not expected, adding using `Commands`. This may decrease performance", - std::any::type_name::() - ); + std::any::type_name::() + ); + } + commands.insert_resource(R::extract_resource(main_resource)); } - commands.insert_resource(R::extract_resource(&main_resource)); } } diff --git a/crates/bevy_render/src/globals.rs b/crates/bevy_render/src/globals.rs index 3ceed24476c75..d337be44b93bc 100644 --- a/crates/bevy_render/src/globals.rs +++ b/crates/bevy_render/src/globals.rs @@ -39,7 +39,7 @@ fn extract_frame_count(mut commands: Commands, frame_count: Extract>) { - commands.insert_resource(time.clone()); + commands.insert_resource(**time); } /// Contains global values useful when writing shaders. diff --git a/crates/bevy_render/src/lib.rs b/crates/bevy_render/src/lib.rs index 0011101740e45..7f013c0a4fb28 100644 --- a/crates/bevy_render/src/lib.rs +++ b/crates/bevy_render/src/lib.rs @@ -92,19 +92,19 @@ pub enum RenderSet { /// Queue drawable entities as phase items in [`RenderPhase`](crate::render_phase::RenderPhase)s /// ready for sorting Queue, - /// A sub-set within Queue where mesh entity queue systems are executed. Ensures `prepare_assets::` is completed. + /// A sub-set within [`Queue`](RenderSet::Queue) where mesh entity queue systems are executed. Ensures `prepare_assets::` is completed. QueueMeshes, - // TODO: This could probably be moved in favor of a system ordering abstraction in Render or Queue + // TODO: This could probably be moved in favor of a system ordering abstraction in `Render` or `Queue` /// Sort the [`RenderPhases`](render_phase::RenderPhase) here. PhaseSort, /// Prepare render resources from extracted data for the GPU based on their sorted order. /// Create [`BindGroups`](crate::render_resource::BindGroup) that depend on those data. Prepare, - /// A sub-set within Prepare for initializing buffers, textures and uniforms for use in bind groups. + /// A sub-set within [`Prepare`](RenderSet::Prepare) for initializing buffers, textures and uniforms for use in bind groups. PrepareResources, /// The copy of [`apply_deferred`] that runs between [`PrepareResources`](RenderSet::PrepareResources) and ['PrepareBindGroups'](RenderSet::PrepareBindGroups). PrepareResourcesFlush, - /// A sub-set within Prepare for constructing bind groups, or other data that relies on render resources prepared in [`PrepareResources`](RenderSet::PrepareResources). + /// A sub-set within [`Prepare`](RenderSet::Prepare) for constructing bind groups, or other data that relies on render resources prepared in [`PrepareResources`](RenderSet::PrepareResources). PrepareBindGroups, /// The copy of [`apply_deferred`] that runs immediately after [`Prepare`](RenderSet::Prepare). PrepareFlush, @@ -127,7 +127,7 @@ impl Render { /// Sets up the base structure of the rendering [`Schedule`]. /// /// The sets defined in this enum are configured to run in order, - /// and a copy of [`apply_deferred`] is inserted into each `*Flush` set. + /// and a copy of [`apply_deferred`] is inserted into each [`*Flush` set](RenderSet). pub fn base_schedule() -> Schedule { use RenderSet::*; diff --git a/crates/bevy_render/src/render_resource/bind_group.rs b/crates/bevy_render/src/render_resource/bind_group.rs index 741a719ae5b96..8ee876b9c5208 100644 --- a/crates/bevy_render/src/render_resource/bind_group.rs +++ b/crates/bevy_render/src/render_resource/bind_group.rs @@ -9,7 +9,7 @@ use crate::{ pub use bevy_render_macros::AsBindGroup; use encase::ShaderType; use std::ops::Deref; -use wgpu::BindingResource; +use wgpu::{BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingResource}; define_atomic_id!(BindGroupId); render_resource_wrapper!(ErasedBindGroup, wgpu::BindGroup); @@ -262,6 +262,11 @@ pub trait AsBindGroup { /// Data that will be stored alongside the "prepared" bind group. type Data: Send + Sync; + /// label + fn label() -> Option<&'static str> { + None + } + /// Creates a bind group for `self` matching the layout defined in [`AsBindGroup::bind_group_layout`]. fn as_bind_group( &self, @@ -269,10 +274,52 @@ pub trait AsBindGroup { render_device: &RenderDevice, images: &RenderAssets, fallback_image: &FallbackImage, - ) -> Result, AsBindGroupError>; + ) -> Result, AsBindGroupError> { + let UnpreparedBindGroup { bindings, data } = + Self::unprepared_bind_group(self, layout, render_device, images, fallback_image)?; + + let entries = bindings + .iter() + .map(|(index, binding)| BindGroupEntry { + binding: *index, + resource: binding.get_binding(), + }) + .collect::>(); + + let bind_group = render_device.create_bind_group(Self::label(), layout, &entries); + + Ok(PreparedBindGroup { + bindings, + bind_group, + data, + }) + } + + /// Returns a vec of (binding index, `OwnedBindingResource`). + /// In cases where `OwnedBindingResource` is not available (as for bindless texture arrays currently), + /// an implementor may define `as_bind_group` directly. This may prevent certain features + /// from working correctly. + fn unprepared_bind_group( + &self, + layout: &BindGroupLayout, + render_device: &RenderDevice, + images: &RenderAssets, + fallback_image: &FallbackImage, + ) -> Result, AsBindGroupError>; /// Creates the bind group layout matching all bind groups returned by [`AsBindGroup::as_bind_group`] fn bind_group_layout(render_device: &RenderDevice) -> BindGroupLayout + where + Self: Sized, + { + render_device.create_bind_group_layout(&BindGroupLayoutDescriptor { + label: Self::label(), + entries: &Self::bind_group_layout_entries(render_device), + }) + } + + /// Returns a vec of bind group layout entries + fn bind_group_layout_entries(render_device: &RenderDevice) -> Vec where Self: Sized; } @@ -285,14 +332,21 @@ pub enum AsBindGroupError { /// A prepared bind group returned as a result of [`AsBindGroup::as_bind_group`]. pub struct PreparedBindGroup { - pub bindings: Vec, + pub bindings: Vec<(u32, OwnedBindingResource)>, pub bind_group: BindGroup, pub data: T, } +/// a map containing `OwnedBindingResource`s, keyed by the target binding index +pub struct UnpreparedBindGroup { + pub bindings: Vec<(u32, OwnedBindingResource)>, + pub data: T, +} + /// An owned binding resource of any type (ex: a [`Buffer`], [`TextureView`], etc). /// This is used by types like [`PreparedBindGroup`] to hold a single list of all /// render resources used by bindings. +#[derive(Debug)] pub enum OwnedBindingResource { Buffer(Buffer), TextureView(TextureView), diff --git a/crates/bevy_render/src/render_resource/bind_group_entries.rs b/crates/bevy_render/src/render_resource/bind_group_entries.rs new file mode 100644 index 0000000000000..09336eeb0a093 --- /dev/null +++ b/crates/bevy_render/src/render_resource/bind_group_entries.rs @@ -0,0 +1,282 @@ +use bevy_utils::all_tuples_with_size; +use wgpu::{BindGroupEntry, BindingResource}; + +use super::{Sampler, TextureView}; + +/// Helper for constructing bindgroups. +/// +/// Allows constructing the descriptor's entries as: +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &BindGroupEntries::with_indices(( +/// (2, &my_sampler), +/// (3, my_uniform), +/// )), +/// ); +/// ``` +/// +/// instead of +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &[ +/// BindGroupEntry { +/// binding: 2, +/// resource: BindingResource::Sampler(&my_sampler), +/// }, +/// BindGroupEntry { +/// binding: 3, +/// resource: my_uniform, +/// }, +/// ], +/// ); +/// ``` +/// +/// or +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &BindGroupEntries::sequential(( +/// &my_sampler, +/// my_uniform, +/// )), +/// ); +/// ``` +/// +/// instead of +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &[ +/// BindGroupEntry { +/// binding: 0, +/// resource: BindingResource::Sampler(&my_sampler), +/// }, +/// BindGroupEntry { +/// binding: 1, +/// resource: my_uniform, +/// }, +/// ], +/// ); +/// ``` +/// +/// or +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &BindGroupEntries::single(my_uniform), +/// ); +/// ``` +/// +/// instead of +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &[ +/// BindGroupEntry { +/// binding: 0, +/// resource: my_uniform, +/// }, +/// ], +/// ); +/// ``` + +pub struct BindGroupEntries<'b, const N: usize = 1> { + entries: [BindGroupEntry<'b>; N], +} + +impl<'b, const N: usize> BindGroupEntries<'b, N> { + #[inline] + pub fn sequential(resources: impl IntoBindingArray<'b, N>) -> Self { + let mut i = 0; + Self { + entries: resources.into_array().map(|resource| { + let binding = i; + i += 1; + BindGroupEntry { binding, resource } + }), + } + } + + #[inline] + pub fn with_indices(indexed_resources: impl IntoIndexedBindingArray<'b, N>) -> Self { + Self { + entries: indexed_resources + .into_array() + .map(|(binding, resource)| BindGroupEntry { binding, resource }), + } + } +} + +impl<'b> BindGroupEntries<'b, 1> { + pub fn single(resource: impl IntoBinding<'b>) -> [BindGroupEntry<'b>; 1] { + [BindGroupEntry { + binding: 0, + resource: resource.into_binding(), + }] + } +} + +impl<'b, const N: usize> std::ops::Deref for BindGroupEntries<'b, N> { + type Target = [BindGroupEntry<'b>]; + + fn deref(&self) -> &[BindGroupEntry<'b>] { + &self.entries + } +} + +pub trait IntoBinding<'a> { + fn into_binding(self) -> BindingResource<'a>; +} + +impl<'a> IntoBinding<'a> for &'a TextureView { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + BindingResource::TextureView(self) + } +} + +impl<'a> IntoBinding<'a> for &'a [&'a wgpu::TextureView] { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + BindingResource::TextureViewArray(self) + } +} + +impl<'a> IntoBinding<'a> for &'a Sampler { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + BindingResource::Sampler(self) + } +} + +impl<'a> IntoBinding<'a> for BindingResource<'a> { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + self + } +} + +impl<'a> IntoBinding<'a> for wgpu::BufferBinding<'a> { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + BindingResource::Buffer(self) + } +} + +pub trait IntoBindingArray<'b, const N: usize> { + fn into_array(self) -> [BindingResource<'b>; N]; +} + +macro_rules! impl_to_binding_slice { + ($N: expr, $(($T: ident, $I: ident)),*) => { + impl<'b, $($T: IntoBinding<'b>),*> IntoBindingArray<'b, $N> for ($($T,)*) { + #[inline] + fn into_array(self) -> [BindingResource<'b>; $N] { + let ($($I,)*) = self; + [$($I.into_binding(), )*] + } + } + } +} + +all_tuples_with_size!(impl_to_binding_slice, 1, 32, T, s); + +pub trait IntoIndexedBindingArray<'b, const N: usize> { + fn into_array(self) -> [(u32, BindingResource<'b>); N]; +} + +macro_rules! impl_to_indexed_binding_slice { + ($N: expr, $(($T: ident, $S: ident, $I: ident)),*) => { + impl<'b, $($T: IntoBinding<'b>),*> IntoIndexedBindingArray<'b, $N> for ($((u32, $T),)*) { + #[inline] + fn into_array(self) -> [(u32, BindingResource<'b>); $N] { + let ($(($S, $I),)*) = self; + [$(($S, $I.into_binding())), *] + } + } + } +} + +all_tuples_with_size!(impl_to_indexed_binding_slice, 1, 32, T, n, s); + +pub struct DynamicBindGroupEntries<'b> { + entries: Vec>, +} + +impl<'b> DynamicBindGroupEntries<'b> { + pub fn sequential(entries: impl IntoBindingArray<'b, N>) -> Self { + Self { + entries: entries + .into_array() + .into_iter() + .enumerate() + .map(|(ix, resource)| BindGroupEntry { + binding: ix as u32, + resource, + }) + .collect(), + } + } + + pub fn extend_sequential( + mut self, + entries: impl IntoBindingArray<'b, N>, + ) -> Self { + let start = self.entries.last().unwrap().binding + 1; + self.entries.extend( + entries + .into_array() + .into_iter() + .enumerate() + .map(|(ix, resource)| BindGroupEntry { + binding: start + ix as u32, + resource, + }), + ); + self + } + + pub fn new_with_indices(entries: impl IntoIndexedBindingArray<'b, N>) -> Self { + Self { + entries: entries + .into_array() + .into_iter() + .map(|(binding, resource)| BindGroupEntry { binding, resource }) + .collect(), + } + } + + pub fn extend_with_indices( + mut self, + entries: impl IntoIndexedBindingArray<'b, N>, + ) -> Self { + self.entries.extend( + entries + .into_array() + .into_iter() + .map(|(binding, resource)| BindGroupEntry { binding, resource }), + ); + self + } +} + +impl<'b> std::ops::Deref for DynamicBindGroupEntries<'b> { + type Target = [BindGroupEntry<'b>]; + + fn deref(&self) -> &[BindGroupEntry<'b>] { + &self.entries + } +} diff --git a/crates/bevy_render/src/render_resource/mod.rs b/crates/bevy_render/src/render_resource/mod.rs index f16f5f1269929..b7d245b0bdbc9 100644 --- a/crates/bevy_render/src/render_resource/mod.rs +++ b/crates/bevy_render/src/render_resource/mod.rs @@ -1,5 +1,6 @@ mod batched_uniform_buffer; mod bind_group; +mod bind_group_entries; mod bind_group_layout; mod buffer; mod buffer_vec; @@ -14,6 +15,7 @@ mod texture; mod uniform_buffer; pub use bind_group::*; +pub use bind_group_entries::*; pub use bind_group_layout::*; pub use buffer::*; pub use buffer_vec::*; diff --git a/crates/bevy_render/src/render_resource/uniform_buffer.rs b/crates/bevy_render/src/render_resource/uniform_buffer.rs index 3ecd692ba2880..7e1b86869c4e0 100644 --- a/crates/bevy_render/src/render_resource/uniform_buffer.rs +++ b/crates/bevy_render/src/render_resource/uniform_buffer.rs @@ -13,6 +13,8 @@ use wgpu::{ util::BufferInitDescriptor, BindingResource, BufferBinding, BufferDescriptor, BufferUsages, }; +use super::IntoBinding; + /// Stores data to be transferred to the GPU and made accessible to shaders as a uniform buffer. /// /// Uniform buffers are available to shaders on a read-only basis. Uniform buffers are commonly used to make available to shaders @@ -139,6 +141,16 @@ impl UniformBuffer { } } +impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a UniformBuffer { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + self.buffer() + .expect("Failed to get buffer") + .as_entire_buffer_binding() + .into_binding() + } +} + /// Stores data to be transferred to the GPU and made accessible to shaders as a dynamic uniform buffer. /// /// Dynamic uniform buffers are available to shaders on a read-only basis. Dynamic uniform buffers are commonly used to make @@ -265,8 +277,16 @@ impl DynamicUniformBuffer { device: &RenderDevice, queue: &'a RenderQueue, ) -> Option> { - let alignment = - AlignmentValue::new(device.limits().min_uniform_buffer_offset_alignment as u64); + let alignment = if cfg!(ios_simulator) { + // On iOS simulator on silicon macs, metal validation check that the host OS alignment + // is respected, but the device reports the correct value for iOS, which is smaller. + // Use the larger value. + // See https://github.com/bevyengine/bevy/pull/10178 - remove if it's not needed anymore. + AlignmentValue::new(256) + } else { + AlignmentValue::new(device.limits().min_uniform_buffer_offset_alignment as u64) + }; + let mut capacity = self.buffer.as_deref().map(wgpu::Buffer::size).unwrap_or(0); let size = alignment .round_up(T::min_size().get()) @@ -367,3 +387,10 @@ impl<'a> BufferMut for QueueWriteBufferViewWrapper<'a> { self.buffer_view.write(offset, val); } } + +impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a DynamicUniformBuffer { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + self.binding().unwrap() + } +} diff --git a/crates/bevy_render/src/renderer/render_device.rs b/crates/bevy_render/src/renderer/render_device.rs index 8a177e94774cb..6a126df8aa41e 100644 --- a/crates/bevy_render/src/renderer/render_device.rs +++ b/crates/bevy_render/src/renderer/render_device.rs @@ -3,7 +3,9 @@ use crate::render_resource::{ RenderPipeline, Sampler, Texture, }; use bevy_ecs::system::Resource; -use wgpu::{util::DeviceExt, BufferAsyncError, BufferBindingType}; +use wgpu::{ + util::DeviceExt, BindGroupDescriptor, BindGroupEntry, BufferAsyncError, BufferBindingType, +}; use super::RenderQueue; @@ -82,8 +84,17 @@ impl RenderDevice { /// Creates a new [`BindGroup`](wgpu::BindGroup). #[inline] - pub fn create_bind_group(&self, desc: &wgpu::BindGroupDescriptor) -> BindGroup { - let wgpu_bind_group = self.device.create_bind_group(desc); + pub fn create_bind_group<'a>( + &self, + label: impl Into>, + layout: &'a BindGroupLayout, + entries: &'a [BindGroupEntry<'a>], + ) -> BindGroup { + let wgpu_bind_group = self.device.create_bind_group(&BindGroupDescriptor { + label: label.into(), + layout, + entries, + }); BindGroup::from(wgpu_bind_group) } diff --git a/crates/bevy_render/src/texture/compressed_image_saver.rs b/crates/bevy_render/src/texture/compressed_image_saver.rs index 26ab7d22785ce..a557447db3d46 100644 --- a/crates/bevy_render/src/texture/compressed_image_saver.rs +++ b/crates/bevy_render/src/texture/compressed_image_saver.rs @@ -40,7 +40,7 @@ impl AssetSaver for CompressedImageSaver { let mut source_image = compressor_params.source_image_mut(0); let size = image.size(); - source_image.init(&image.data, size.x as u32, size.y as u32, 4); + source_image.init(&image.data, size.x, size.y, 4); let mut compressor = basis_universal::Compressor::new(4); // SAFETY: the CompressorParams are "valid" to the best of our knowledge. The basis-universal diff --git a/crates/bevy_render/src/texture/dds.rs b/crates/bevy_render/src/texture/dds.rs index 6f773193ab9a0..16f1aa7240f97 100644 --- a/crates/bevy_render/src/texture/dds.rs +++ b/crates/bevy_render/src/texture/dds.rs @@ -1,6 +1,8 @@ -use ddsfile::{D3DFormat, Dds, DxgiFormat}; +use ddsfile::{Caps2, D3DFormat, Dds, DxgiFormat}; use std::io::Cursor; -use wgpu::{Extent3d, TextureDimension, TextureFormat}; +use wgpu::{ + Extent3d, TextureDimension, TextureFormat, TextureViewDescriptor, TextureViewDimension, +}; use super::{CompressedImageFormats, Image, TextureError}; @@ -18,14 +20,29 @@ pub fn dds_buffer_to_image( ))); } let mut image = Image::default(); + let is_cubemap = dds.header.caps2.contains(Caps2::CUBEMAP); + let mut depth_or_array_layers = if dds.get_num_array_layers() > 1 { + dds.get_num_array_layers() + } else { + dds.get_depth() + }; + if is_cubemap { + if !dds.header.caps2.contains( + Caps2::CUBEMAP_NEGATIVEX + | Caps2::CUBEMAP_NEGATIVEY + | Caps2::CUBEMAP_NEGATIVEZ + | Caps2::CUBEMAP_POSITIVEX + | Caps2::CUBEMAP_POSITIVEY + | Caps2::CUBEMAP_POSITIVEZ, + ) { + return Err(TextureError::IncompleteCubemap); + } + depth_or_array_layers *= 6; + } image.texture_descriptor.size = Extent3d { width: dds.get_width(), height: dds.get_height(), - depth_or_array_layers: if dds.get_num_array_layers() > 1 { - dds.get_num_array_layers() - } else { - dds.get_depth() - }, + depth_or_array_layers, } .physical_size(texture_format); image.texture_descriptor.mip_level_count = dds.get_num_mipmap_levels(); @@ -37,6 +54,17 @@ pub fn dds_buffer_to_image( } else { TextureDimension::D1 }; + if is_cubemap { + let dimension = if image.texture_descriptor.size.depth_or_array_layers > 6 { + TextureViewDimension::CubeArray + } else { + TextureViewDimension::Cube + }; + image.texture_view_descriptor = Some(TextureViewDescriptor { + dimension: Some(dimension), + ..Default::default() + }); + } image.data = dds.data; Ok(image) } diff --git a/crates/bevy_render/src/texture/fallback_image.rs b/crates/bevy_render/src/texture/fallback_image.rs index 4075963a614a2..923a0247e6cf7 100644 --- a/crates/bevy_render/src/texture/fallback_image.rs +++ b/crates/bevy_render/src/texture/fallback_image.rs @@ -4,7 +4,6 @@ use bevy_ecs::{ prelude::{FromWorld, Res, ResMut}, system::{Resource, SystemParam}, }; -use bevy_math::Vec2; use bevy_utils::HashMap; use wgpu::{Extent3d, TextureFormat}; @@ -103,17 +102,14 @@ fn fallback_image_new( }); let sampler = match image.sampler_descriptor { ImageSampler::Default => (**default_sampler).clone(), - ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + ImageSampler::Descriptor(ref descriptor) => render_device.create_sampler(descriptor), }; GpuImage { texture, texture_view, texture_format: image.texture_descriptor.format, sampler, - size: Vec2::new( - image.texture_descriptor.size.width as f32, - image.texture_descriptor.size.height as f32, - ), + size: image.size_f32(), mip_level_count: image.texture_descriptor.mip_level_count, } } diff --git a/crates/bevy_render/src/texture/image.rs b/crates/bevy_render/src/texture/image.rs index d14388d87504c..b90ec82ce431a 100644 --- a/crates/bevy_render/src/texture/image.rs +++ b/crates/bevy_render/src/texture/image.rs @@ -14,7 +14,7 @@ use crate::{ use bevy_asset::Asset; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::system::{lifetimeless::SRes, Resource, SystemParamItem}; -use bevy_math::Vec2; +use bevy_math::{UVec2, Vec2}; use bevy_reflect::Reflect; use serde::{Deserialize, Serialize}; use std::hash::Hash; @@ -254,17 +254,34 @@ impl Image { value } + /// Returns the width of a 2D image. + #[inline] + pub fn width(&self) -> u32 { + self.texture_descriptor.size.width + } + + /// Returns the height of a 2D image. + #[inline] + pub fn height(&self) -> u32 { + self.texture_descriptor.size.height + } + /// Returns the aspect ratio (height/width) of a 2D image. - pub fn aspect_2d(&self) -> f32 { - self.texture_descriptor.size.height as f32 / self.texture_descriptor.size.width as f32 + #[inline] + pub fn aspect_ratio(&self) -> f32 { + self.height() as f32 / self.width() as f32 + } + + /// Returns the size of a 2D image as f32. + #[inline] + pub fn size_f32(&self) -> Vec2 { + Vec2::new(self.width() as f32, self.height() as f32) } /// Returns the size of a 2D image. - pub fn size(&self) -> Vec2 { - Vec2::new( - self.texture_descriptor.size.width as f32, - self.texture_descriptor.size.height as f32, - ) + #[inline] + pub fn size(&self) -> UVec2 { + UVec2::new(self.width(), self.height()) } /// Resizes the image to the new size, by removing information or appending 0 to the `data`. @@ -304,11 +321,11 @@ impl Image { // Must be a stacked image, and the height must be divisible by layers. assert!(self.texture_descriptor.dimension == TextureDimension::D2); assert!(self.texture_descriptor.size.depth_or_array_layers == 1); - assert_eq!(self.texture_descriptor.size.height % layers, 0); + assert_eq!(self.height() % layers, 0); self.reinterpret_size(Extent3d { - width: self.texture_descriptor.size.width, - height: self.texture_descriptor.size.height / layers, + width: self.width(), + height: self.height() / layers, depth_or_array_layers: layers, }); } @@ -438,6 +455,9 @@ pub enum TextureError { TranscodeError(String), #[error("format requires transcoding: {0:?}")] FormatRequiresTranscodingError(TranscodeFormat), + /// Only cubemaps with six faces are supported. + #[error("only cubemaps with six faces are supported")] + IncompleteCubemap, } /// The type of a raw image buffer. @@ -633,12 +653,12 @@ mod test { ); assert_eq!( Vec2::new(size.width as f32, size.height as f32), - image.size() + image.size_f32() ); } #[test] fn image_default_size() { let image = Image::default(); - assert_eq!(Vec2::ONE, image.size()); + assert_eq!(Vec2::ONE, image.size_f32()); } } diff --git a/crates/bevy_render/src/texture/image_texture_conversion.rs b/crates/bevy_render/src/texture/image_texture_conversion.rs index 6fce5e5ecf1a4..298c39219c0cc 100644 --- a/crates/bevy_render/src/texture/image_texture_conversion.rs +++ b/crates/bevy_render/src/texture/image_texture_conversion.rs @@ -165,38 +165,28 @@ impl Image { /// To convert [`Image`] to a different format see: [`Image::convert`]. pub fn try_into_dynamic(self) -> Result { match self.texture_descriptor.format { - TextureFormat::R8Unorm => ImageBuffer::from_raw( - self.texture_descriptor.size.width, - self.texture_descriptor.size.height, - self.data, - ) - .map(DynamicImage::ImageLuma8), - TextureFormat::Rg8Unorm => ImageBuffer::from_raw( - self.texture_descriptor.size.width, - self.texture_descriptor.size.height, - self.data, - ) - .map(DynamicImage::ImageLumaA8), - TextureFormat::Rgba8UnormSrgb => ImageBuffer::from_raw( - self.texture_descriptor.size.width, - self.texture_descriptor.size.height, - self.data, - ) - .map(DynamicImage::ImageRgba8), + TextureFormat::R8Unorm => ImageBuffer::from_raw(self.width(), self.height(), self.data) + .map(DynamicImage::ImageLuma8), + TextureFormat::Rg8Unorm => { + ImageBuffer::from_raw(self.width(), self.height(), self.data) + .map(DynamicImage::ImageLumaA8) + } + TextureFormat::Rgba8UnormSrgb => { + ImageBuffer::from_raw(self.width(), self.height(), self.data) + .map(DynamicImage::ImageRgba8) + } // This format is commonly used as the format for the swapchain texture // This conversion is added here to support screenshots - TextureFormat::Bgra8UnormSrgb | TextureFormat::Bgra8Unorm => ImageBuffer::from_raw( - self.texture_descriptor.size.width, - self.texture_descriptor.size.height, - { + TextureFormat::Bgra8UnormSrgb | TextureFormat::Bgra8Unorm => { + ImageBuffer::from_raw(self.width(), self.height(), { let mut data = self.data; for bgra in data.chunks_exact_mut(4) { bgra.swap(0, 2); } data - }, - ) - .map(DynamicImage::ImageRgba8), + }) + .map(DynamicImage::ImageRgba8) + } // Throw and error if conversion isn't supported texture_format => return Err(IntoDynamicImageError::UnsupportedFormat(texture_format)), } diff --git a/crates/bevy_render/src/view/window/mod.rs b/crates/bevy_render/src/view/window/mod.rs index f402825b96509..31ac4fca2cee3 100644 --- a/crates/bevy_render/src/view/window/mod.rs +++ b/crates/bevy_render/src/view/window/mod.rs @@ -1,5 +1,7 @@ use crate::{ - render_resource::{PipelineCache, SpecializedRenderPipelines, SurfaceTexture, TextureView}, + render_resource::{ + BindGroupEntries, PipelineCache, SpecializedRenderPipelines, SurfaceTexture, TextureView, + }, renderer::{RenderAdapter, RenderDevice, RenderInstance}, texture::TextureFormatPixelInfo, Extract, ExtractSchedule, Render, RenderApp, RenderSet, @@ -344,7 +346,9 @@ pub fn prepare_windows( .enumerate_adapters(wgpu::Backends::VULKAN) .any(|adapter| { let name = adapter.get_info().name; - name.starts_with("AMD") || name.starts_with("Intel") + name.starts_with("Radeon") + || name.starts_with("AMD") + || name.starts_with("Intel") }) }; @@ -411,14 +415,11 @@ pub fn prepare_windows( usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST, mapped_at_creation: false, }); - let bind_group = render_device.create_bind_group(&wgpu::BindGroupDescriptor { - label: Some("screenshot-to-screen-bind-group"), - layout: &screenshot_pipeline.bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: wgpu::BindingResource::TextureView(&texture_view), - }], - }); + let bind_group = render_device.create_bind_group( + "screenshot-to-screen-bind-group", + &screenshot_pipeline.bind_group_layout, + &BindGroupEntries::single(&texture_view), + ); let pipeline_id = pipelines.specialize( &pipeline_cache, &screenshot_pipeline, diff --git a/crates/bevy_scene/src/scene_loader.rs b/crates/bevy_scene/src/scene_loader.rs index 88d6ca8027fb1..5d1e4de56ade4 100644 --- a/crates/bevy_scene/src/scene_loader.rs +++ b/crates/bevy_scene/src/scene_loader.rs @@ -29,10 +29,10 @@ impl FromWorld for SceneLoader { #[non_exhaustive] #[derive(Debug, Error)] pub enum SceneLoaderError { - /// An [IO](std::io) Error - #[error("Could load shader: {0}")] + /// An [IO Error](std::io::Error) + #[error("Error while trying to read the scene file: {0}")] Io(#[from] std::io::Error), - /// A [RON](ron) Error + /// A [RON Error](ron::error::SpannedError) #[error("Could not parse RON: {0}")] RonSpannedError(#[from] ron::error::SpannedError), } diff --git a/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs b/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs index 18dd13a0ed8ff..fcf19bba537ae 100644 --- a/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs +++ b/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs @@ -36,8 +36,8 @@ impl DynamicTextureAtlasBuilder { texture: &Image, ) -> Option { let allocation = self.atlas_allocator.allocate(size2( - texture.texture_descriptor.size.width as i32 + self.padding, - texture.texture_descriptor.size.height as i32 + self.padding, + texture.width() as i32 + self.padding, + texture.height() as i32 + self.padding, )); if let Some(allocation) = allocation { let atlas_texture = textures.get_mut(&texture_atlas.texture).unwrap(); @@ -59,7 +59,7 @@ impl DynamicTextureAtlasBuilder { let mut rect = allocation.rectangle; rect.max.x -= self.padding; rect.max.y -= self.padding; - let atlas_width = atlas_texture.texture_descriptor.size.width as usize; + let atlas_width = atlas_texture.width() as usize; let rect_width = rect.width() as usize; let format_size = atlas_texture.texture_descriptor.format.pixel_size(); diff --git a/crates/bevy_sprite/src/lib.rs b/crates/bevy_sprite/src/lib.rs index 1eb3b1a5cc64c..9f68a9a3b981e 100644 --- a/crates/bevy_sprite/src/lib.rs +++ b/crates/bevy_sprite/src/lib.rs @@ -138,7 +138,7 @@ pub fn calculate_bounds_2d( for (entity, sprite, texture_handle) in &sprites_without_aabb { if let Some(size) = sprite .custom_size - .or_else(|| images.get(texture_handle).map(|image| image.size())) + .or_else(|| images.get(texture_handle).map(|image| image.size_f32())) { let aabb = Aabb { center: (-sprite.anchor.as_vec() * size).extend(0.0).into(), diff --git a/crates/bevy_sprite/src/mesh2d/color_material.wgsl b/crates/bevy_sprite/src/mesh2d/color_material.wgsl index 6f125a83b18ba..1ed5d75341b0c 100644 --- a/crates/bevy_sprite/src/mesh2d/color_material.wgsl +++ b/crates/bevy_sprite/src/mesh2d/color_material.wgsl @@ -1,6 +1,7 @@ -#import bevy_sprite::mesh2d_types Mesh2d -#import bevy_sprite::mesh2d_vertex_output VertexOutput -#import bevy_sprite::mesh2d_view_bindings view +#import bevy_sprite::{ + mesh2d_vertex_output::VertexOutput, + mesh2d_view_bindings::view, +} #ifdef TONEMAP_IN_SHADER #import bevy_core_pipeline::tonemapping @@ -29,7 +30,7 @@ fn fragment( output_color = output_color * textureSample(texture, texture_sampler, mesh.uv); } #ifdef TONEMAP_IN_SHADER - output_color = bevy_core_pipeline::tonemapping::tone_mapping(output_color, view.color_grading); + output_color = tonemapping::tone_mapping(output_color, view.color_grading); #endif return output_color; } diff --git a/crates/bevy_sprite/src/mesh2d/material.rs b/crates/bevy_sprite/src/mesh2d/material.rs index 74f6073066b31..30593c875e208 100644 --- a/crates/bevy_sprite/src/mesh2d/material.rs +++ b/crates/bevy_sprite/src/mesh2d/material.rs @@ -462,7 +462,7 @@ pub struct Material2dBindGroupId(Option); /// Data prepared for a [`Material2d`] instance. pub struct PreparedMaterial2d { - pub bindings: Vec, + pub bindings: Vec<(u32, OwnedBindingResource)>, pub bind_group: BindGroup, pub key: T::Data, } diff --git a/crates/bevy_sprite/src/mesh2d/mesh.rs b/crates/bevy_sprite/src/mesh2d/mesh.rs index 5f62000bc441e..7aa212177dac3 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh.rs +++ b/crates/bevy_sprite/src/mesh2d/mesh.rs @@ -8,7 +8,7 @@ use bevy_ecs::{ query::{QueryItem, ROQueryItem}, system::{lifetimeless::*, SystemParamItem, SystemState}, }; -use bevy_math::{Affine3, Vec2, Vec4}; +use bevy_math::{Affine3, Vec4}; use bevy_reflect::Reflect; use bevy_render::{ batching::{ @@ -297,7 +297,9 @@ impl FromWorld for Mesh2dPipeline { let texture = render_device.create_texture(&image.texture_descriptor); let sampler = match image.sampler_descriptor { ImageSampler::Default => (**default_sampler).clone(), - ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + ImageSampler::Descriptor(ref descriptor) => { + render_device.create_sampler(descriptor) + } }; let format_size = image.texture_descriptor.format.pixel_size(); @@ -311,7 +313,7 @@ impl FromWorld for Mesh2dPipeline { &image.data, ImageDataLayout { offset: 0, - bytes_per_row: Some(image.texture_descriptor.size.width * format_size as u32), + bytes_per_row: Some(image.width() * format_size as u32), rows_per_image: None, }, image.texture_descriptor.size, @@ -323,10 +325,7 @@ impl FromWorld for Mesh2dPipeline { texture_view, texture_format: image.texture_descriptor.format, sampler, - size: Vec2::new( - image.texture_descriptor.size.width as f32, - image.texture_descriptor.size.height as f32, - ), + size: image.size_f32(), mip_level_count: image.texture_descriptor.mip_level_count, } }; @@ -596,14 +595,11 @@ pub fn prepare_mesh2d_bind_group( ) { if let Some(binding) = mesh2d_uniforms.binding() { commands.insert_resource(Mesh2dBindGroup { - value: render_device.create_bind_group(&BindGroupDescriptor { - entries: &[BindGroupEntry { - binding: 0, - resource: binding, - }], - label: Some("mesh2d_bind_group"), - layout: &mesh2d_pipeline.mesh_layout, - }), + value: render_device.create_bind_group( + "mesh2d_bind_group", + &mesh2d_pipeline.mesh_layout, + &BindGroupEntries::single(binding), + ), }); } } @@ -626,20 +622,11 @@ pub fn prepare_mesh2d_view_bind_groups( globals_buffer.buffer.binding(), ) { for entity in &views { - let view_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ - BindGroupEntry { - binding: 0, - resource: view_binding.clone(), - }, - BindGroupEntry { - binding: 1, - resource: globals.clone(), - }, - ], - label: Some("mesh2d_view_bind_group"), - layout: &mesh2d_pipeline.view_layout, - }); + let view_bind_group = render_device.create_bind_group( + "mesh2d_view_bind_group", + &mesh2d_pipeline.view_layout, + &BindGroupEntries::sequential((view_binding.clone(), globals.clone())), + ); commands.entity(entity).insert(Mesh2dViewBindGroup { value: view_bind_group, diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl index a3a8d118c199d..00c1ec8442ad2 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl @@ -1,7 +1,8 @@ -#import bevy_sprite::mesh2d_functions as mesh_functions -#import bevy_sprite::mesh2d_bindings mesh -#import bevy_sprite::mesh2d_vertex_output VertexOutput -#import bevy_sprite::mesh2d_view_bindings view +#import bevy_sprite::{ + mesh2d_functions as mesh_functions, + mesh2d_vertex_output::VertexOutput, + mesh2d_view_bindings::view, +} #ifdef TONEMAP_IN_SHADER #import bevy_core_pipeline::tonemapping @@ -66,7 +67,7 @@ fn fragment( #ifdef VERTEX_COLORS var color = in.color; #ifdef TONEMAP_IN_SHADER - color = bevy_core_pipeline::tonemapping::tone_mapping(color, view.color_grading); + color = tonemapping::tone_mapping(color, view.color_grading); #endif return color; #else diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl index e673ef23f06b6..3c3ec0906efcc 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_sprite::mesh2d_bindings -#import bevy_sprite::mesh2d_types Mesh2d +#import bevy_sprite::mesh2d_types::Mesh2d #ifdef MESH_BINDGROUP_1 diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl index b936cad10f66f..b2bc92a00b73c 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl @@ -1,9 +1,13 @@ #define_import_path bevy_sprite::mesh2d_functions -#import bevy_sprite::mesh2d_view_bindings view -#import bevy_sprite::mesh2d_bindings mesh -#import bevy_render::instance_index get_instance_index -#import bevy_render::maths affine_to_square, mat2x4_f32_to_mat3x3_unpack +#import bevy_sprite::{ + mesh2d_view_bindings::view, + mesh2d_bindings::mesh, +} +#import bevy_render::{ + instance_index::get_instance_index, + maths::{affine_to_square, mat2x4_f32_to_mat3x3_unpack}, +} fn get_model_matrix(instance_index: u32) -> mat4x4 { return affine_to_square(mesh[get_instance_index(instance_index)].model); diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl index 55eb7b964c23a..8b2f57d6eaf94 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl @@ -1,7 +1,7 @@ #define_import_path bevy_sprite::mesh2d_view_bindings -#import bevy_render::view View -#import bevy_render::globals Globals +#import bevy_render::view::View +#import bevy_render::globals::Globals @group(0) @binding(0) var view: View; diff --git a/crates/bevy_sprite/src/render/mod.rs b/crates/bevy_sprite/src/render/mod.rs index c70d33286ccf8..7aa5ae0c147a6 100644 --- a/crates/bevy_sprite/src/render/mod.rs +++ b/crates/bevy_sprite/src/render/mod.rs @@ -21,7 +21,7 @@ use bevy_render::{ DrawFunctions, PhaseItem, RenderCommand, RenderCommandResult, RenderPhase, SetItemPipeline, TrackedRenderPass, }, - render_resource::*, + render_resource::{BindGroupEntries, *}, renderer::{RenderDevice, RenderQueue}, texture::{ BevyDefault, DefaultImageSampler, GpuImage, Image, ImageSampler, TextureFormatPixelInfo, @@ -93,7 +93,9 @@ impl FromWorld for SpritePipeline { let texture = render_device.create_texture(&image.texture_descriptor); let sampler = match image.sampler_descriptor { ImageSampler::Default => (**default_sampler).clone(), - ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + ImageSampler::Descriptor(ref descriptor) => { + render_device.create_sampler(descriptor) + } }; let format_size = image.texture_descriptor.format.pixel_size(); @@ -107,7 +109,7 @@ impl FromWorld for SpritePipeline { &image.data, ImageDataLayout { offset: 0, - bytes_per_row: Some(image.texture_descriptor.size.width * format_size as u32), + bytes_per_row: Some(image.width() * format_size as u32), rows_per_image: None, }, image.texture_descriptor.size, @@ -118,10 +120,7 @@ impl FromWorld for SpritePipeline { texture_view, texture_format: image.texture_descriptor.format, sampler, - size: Vec2::new( - image.texture_descriptor.size.width as f32, - image.texture_descriptor.size.height as f32, - ), + size: image.size_f32(), mip_level_count: image.texture_descriptor.mip_level_count, } }; @@ -623,14 +622,11 @@ pub fn prepare_sprites( // Clear the sprite instances sprite_meta.sprite_instance_buffer.clear(); - sprite_meta.view_bind_group = Some(render_device.create_bind_group(&BindGroupDescriptor { - entries: &[BindGroupEntry { - binding: 0, - resource: view_binding, - }], - label: Some("sprite_view_bind_group"), - layout: &sprite_pipeline.view_layout, - })); + sprite_meta.view_bind_group = Some(render_device.create_bind_group( + "sprite_view_bind_group", + &sprite_pipeline.view_layout, + &BindGroupEntries::single(view_binding), + )); // Index buffer indices let mut index = 0; @@ -667,22 +663,14 @@ pub fn prepare_sprites( .values .entry(batch_image_handle) .or_insert_with(|| { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &gpu_image.texture_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&gpu_image.sampler), - }, - ], - label: Some("sprite_material_bind_group"), - layout: &sprite_pipeline.material_layout, - }) + render_device.create_bind_group( + "sprite_material_bind_group", + &sprite_pipeline.material_layout, + &BindGroupEntries::sequential(( + &gpu_image.texture_view, + &gpu_image.sampler, + )), + ) }); } diff --git a/crates/bevy_sprite/src/render/sprite.wgsl b/crates/bevy_sprite/src/render/sprite.wgsl index 536971e5cb6e6..1f5c0125e17ae 100644 --- a/crates/bevy_sprite/src/render/sprite.wgsl +++ b/crates/bevy_sprite/src/render/sprite.wgsl @@ -2,8 +2,10 @@ #import bevy_core_pipeline::tonemapping #endif -#import bevy_render::maths affine_to_square -#import bevy_render::view View +#import bevy_render::{ + maths::affine_to_square, + view::View, +} @group(0) @binding(0) var view: View; @@ -54,7 +56,7 @@ fn fragment(in: VertexOutput) -> @location(0) vec4 { var color = in.color * textureSample(sprite_texture, sprite_sampler, in.uv); #ifdef TONEMAP_IN_SHADER - color = bevy_core_pipeline::tonemapping::tone_mapping(color, view.color_grading); + color = tonemapping::tone_mapping(color, view.color_grading); #endif return color; diff --git a/crates/bevy_sprite/src/texture_atlas_builder.rs b/crates/bevy_sprite/src/texture_atlas_builder.rs index a4d55016afcc2..50ae821510b2b 100644 --- a/crates/bevy_sprite/src/texture_atlas_builder.rs +++ b/crates/bevy_sprite/src/texture_atlas_builder.rs @@ -105,7 +105,7 @@ impl TextureAtlasBuilder { let rect_height = (packed_location.height() - padding.y) as usize; let rect_x = packed_location.x() as usize; let rect_y = packed_location.y() as usize; - let atlas_width = atlas_texture.texture_descriptor.size.width as usize; + let atlas_width = atlas_texture.width() as usize; let format_size = atlas_texture.texture_descriptor.format.pixel_size(); for (texture_y, bound_y) in (rect_y..rect_y + rect_height).enumerate() { @@ -247,10 +247,7 @@ impl TextureAtlasBuilder { self.copy_converted_texture(&mut atlas_texture, texture, packed_location); } Ok(TextureAtlas { - size: Vec2::new( - atlas_texture.texture_descriptor.size.width as f32, - atlas_texture.texture_descriptor.size.height as f32, - ), + size: atlas_texture.size_f32(), texture: textures.add(atlas_texture), textures: texture_rects, texture_handles: Some(texture_ids), diff --git a/crates/bevy_tasks/Cargo.toml b/crates/bevy_tasks/Cargo.toml index c4607fbcf87ce..60f45cb3680e4 100644 --- a/crates/bevy_tasks/Cargo.toml +++ b/crates/bevy_tasks/Cargo.toml @@ -15,7 +15,7 @@ multi-threaded = [] futures-lite = "1.4.0" async-executor = "1.3.0" async-channel = "1.4.2" -async-io = { version = "1.13.0", optional = true } +async-io = { version = "2.0.0", optional = true } async-task = "4.2.0" concurrent-queue = "2.0.0" diff --git a/crates/bevy_tasks/README.md b/crates/bevy_tasks/README.md index 233d6794c7d32..1d1a7fb90465b 100644 --- a/crates/bevy_tasks/README.md +++ b/crates/bevy_tasks/README.md @@ -1,4 +1,10 @@ -# bevy_tasks +# Bevy Tasks + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy_tasks) +[![Downloads](https://img.shields.io/crates/d/bevy_tasks.svg)](https://crates.io/crates/bevy_tasks) +[![Docs](https://docs.rs/bevy_tasks/badge.svg)](https://docs.rs/bevy_tasks/latest/bevy_tasks/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) A refreshingly simple task executor for bevy. :) diff --git a/crates/bevy_tasks/src/single_threaded_task_pool.rs b/crates/bevy_tasks/src/single_threaded_task_pool.rs index 36e38df5a7970..9555a6a470f7c 100644 --- a/crates/bevy_tasks/src/single_threaded_task_pool.rs +++ b/crates/bevy_tasks/src/single_threaded_task_pool.rs @@ -12,7 +12,7 @@ pub struct TaskPoolBuilder {} /// This is a dummy struct for wasm support to provide the same api as with the multithreaded /// task pool. In the case of the multithreaded task pool this struct is used to spawn /// tasks on a specific thread. But the wasm task pool just calls -/// [`wasm_bindgen_futures::spawn_local`] for spawning which just runs tasks on the main thread +/// `wasm_bindgen_futures::spawn_local` for spawning which just runs tasks on the main thread /// and so the [`ThreadExecutor`] does nothing. #[derive(Default)] pub struct ThreadExecutor<'a>(PhantomData<&'a ()>); @@ -159,7 +159,7 @@ impl TaskPool { FakeTask } - /// Spawns a static future on the JS event loop. This is exactly the same as [`TaskSpool::spawn`]. + /// Spawns a static future on the JS event loop. This is exactly the same as [`TaskPool::spawn`]. pub fn spawn_local(&self, future: impl Future + 'static) -> FakeTask where T: 'static, diff --git a/crates/bevy_tasks/src/usages.rs b/crates/bevy_tasks/src/usages.rs index 49b8b5cd2ff72..fda3092b8ebc8 100644 --- a/crates/bevy_tasks/src/usages.rs +++ b/crates/bevy_tasks/src/usages.rs @@ -1,107 +1,77 @@ use super::TaskPool; use std::{ops::Deref, sync::OnceLock}; -static COMPUTE_TASK_POOL: OnceLock = OnceLock::new(); -static ASYNC_COMPUTE_TASK_POOL: OnceLock = OnceLock::new(); -static IO_TASK_POOL: OnceLock = OnceLock::new(); - -/// A newtype for a task pool for CPU-intensive work that must be completed to -/// deliver the next frame -/// -/// See [`TaskPool`] documentation for details on Bevy tasks. -/// [`AsyncComputeTaskPool`] should be preferred if the work does not have to be -/// completed before the next frame. -#[derive(Debug)] -pub struct ComputeTaskPool(TaskPool); - -impl ComputeTaskPool { - /// Initializes the global [`ComputeTaskPool`] instance. - pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { - COMPUTE_TASK_POOL.get_or_init(|| Self(f())) - } - - /// Gets the global [`ComputeTaskPool`] instance. - /// - /// # Panics - /// Panics if no pool has been initialized yet. - pub fn get() -> &'static Self { - COMPUTE_TASK_POOL.get().expect( - "A ComputeTaskPool has not been initialized yet. Please call \ - ComputeTaskPool::init beforehand.", - ) - } +macro_rules! taskpool { + ($(#[$attr:meta])* ($static:ident, $type:ident)) => { + static $static: OnceLock<$type> = OnceLock::new(); + + $(#[$attr])* + #[derive(Debug)] + pub struct $type(TaskPool); + + impl $type { + #[doc = concat!(" Gets the global [`", stringify!($type), "`] instance, or initializes it with `f`.")] + pub fn get_or_init(f: impl FnOnce() -> TaskPool) -> &'static Self { + $static.get_or_init(|| Self(f())) + } + + #[doc = concat!(" Attempts to get the global [`", stringify!($type), "`] instance, \ + or returns `None` if it is not initialized.")] + pub fn try_get() -> Option<&'static Self> { + $static.get() + } + + #[doc = concat!(" Gets the global [`", stringify!($type), "`] instance.")] + #[doc = ""] + #[doc = " # Panics"] + #[doc = " Panics if the global instance has not been initialized yet."] + pub fn get() -> &'static Self { + $static.get().expect( + concat!( + "The ", + stringify!($type), + " has not been initialized yet. Please call ", + stringify!($type), + "::get_or_init beforehand." + ) + ) + } + } + + impl Deref for $type { + type Target = TaskPool; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + }; } -impl Deref for ComputeTaskPool { - type Target = TaskPool; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// A newtype for a task pool for CPU-intensive work that may span across multiple frames -/// -/// See [`TaskPool`] documentation for details on Bevy tasks. Use [`ComputeTaskPool`] if -/// the work must be complete before advancing to the next frame. -#[derive(Debug)] -pub struct AsyncComputeTaskPool(TaskPool); - -impl AsyncComputeTaskPool { - /// Initializes the global [`AsyncComputeTaskPool`] instance. - pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { - ASYNC_COMPUTE_TASK_POOL.get_or_init(|| Self(f())) - } - - /// Gets the global [`AsyncComputeTaskPool`] instance. +taskpool! { + /// A newtype for a task pool for CPU-intensive work that must be completed to + /// deliver the next frame /// - /// # Panics - /// Panics if no pool has been initialized yet. - pub fn get() -> &'static Self { - ASYNC_COMPUTE_TASK_POOL.get().expect( - "A AsyncComputeTaskPool has not been initialized yet. Please call \ - AsyncComputeTaskPool::init beforehand.", - ) - } -} - -impl Deref for AsyncComputeTaskPool { - type Target = TaskPool; - - fn deref(&self) -> &Self::Target { - &self.0 - } + /// See [`TaskPool`] documentation for details on Bevy tasks. + /// [`AsyncComputeTaskPool`] should be preferred if the work does not have to be + /// completed before the next frame. + (COMPUTE_TASK_POOL, ComputeTaskPool) } -/// A newtype for a task pool for IO-intensive work (i.e. tasks that spend very little time in a -/// "woken" state) -#[derive(Debug)] -pub struct IoTaskPool(TaskPool); - -impl IoTaskPool { - /// Initializes the global [`IoTaskPool`] instance. - pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { - IO_TASK_POOL.get_or_init(|| Self(f())) - } - - /// Gets the global [`IoTaskPool`] instance. +taskpool! { + /// A newtype for a task pool for CPU-intensive work that may span across multiple frames /// - /// # Panics - /// Panics if no pool has been initialized yet. - pub fn get() -> &'static Self { - IO_TASK_POOL.get().expect( - "A IoTaskPool has not been initialized yet. Please call \ - IoTaskPool::init beforehand.", - ) - } + /// See [`TaskPool`] documentation for details on Bevy tasks. + /// Use [`ComputeTaskPool`] if the work must be complete before advancing to the next frame. + (ASYNC_COMPUTE_TASK_POOL, AsyncComputeTaskPool) } -impl Deref for IoTaskPool { - type Target = TaskPool; - - fn deref(&self) -> &Self::Target { - &self.0 - } +taskpool! { + /// A newtype for a task pool for IO-intensive work (i.e. tasks that spend very little time in a + /// "woken" state) + /// + /// See [`TaskPool`] documentation for details on Bevy tasks. + (IO_TASK_POOL, IoTaskPool) } /// A function used by `bevy_core` to tick the global tasks pools on the main thread. diff --git a/crates/bevy_text/src/font_atlas_set.rs b/crates/bevy_text/src/font_atlas_set.rs index 730d978c1477c..451db26bd50a1 100644 --- a/crates/bevy_text/src/font_atlas_set.rs +++ b/crates/bevy_text/src/font_atlas_set.rs @@ -107,7 +107,7 @@ impl FontAtlasSet { .texture_descriptor .size .height - .max(glyph_texture.texture_descriptor.size.width); + .max(glyph_texture.width()); // Pick the higher of 512 or the smallest power of 2 greater than glyph_max_size let containing = (1u32 << (32 - glyph_max_size.leading_zeros())).max(512) as f32; font_atlases.push(FontAtlas::new( diff --git a/crates/bevy_text/src/lib.rs b/crates/bevy_text/src/lib.rs index f5498d26ef1fb..024dc663f7cd8 100644 --- a/crates/bevy_text/src/lib.rs +++ b/crates/bevy_text/src/lib.rs @@ -26,9 +26,9 @@ pub mod prelude { } use bevy_app::prelude::*; +use bevy_asset::AssetApp; #[cfg(feature = "default_font")] -use bevy_asset::load_internal_binary_asset; -use bevy_asset::{AssetApp, Handle}; +use bevy_asset::{load_internal_binary_asset, Handle}; use bevy_ecs::prelude::*; use bevy_render::{camera::CameraUpdateSystem, ExtractSchedule, RenderApp}; use bevy_sprite::SpriteSystem; diff --git a/crates/bevy_time/src/common_conditions.rs b/crates/bevy_time/src/common_conditions.rs index 974de24dfae2c..1e4fbc8d5a16a 100644 --- a/crates/bevy_time/src/common_conditions.rs +++ b/crates/bevy_time/src/common_conditions.rs @@ -1,11 +1,9 @@ -use crate::{fixed_timestep::FixedTime, Time, Timer, TimerMode}; +use crate::{Real, Time, Timer, TimerMode}; use bevy_ecs::system::Res; use bevy_utils::Duration; /// Run condition that is active on a regular time interval, using [`Time`] to advance -/// the timer. -/// -/// If used for a fixed timestep system, use [`on_fixed_timer`] instead. +/// the timer. The timer ticks at the rate of [`Time::relative_speed`]. /// /// ```rust,no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, PluginGroup, Update}; @@ -40,22 +38,18 @@ pub fn on_timer(duration: Duration) -> impl FnMut(Res