From 9290674060c8bdf1b2ec34e3d3859a93a60e5f7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois?= Date: Sat, 14 Oct 2023 00:40:28 +0200 Subject: [PATCH 01/63] GLTF loader: handle warning NODE_SKINNED_MESH_WITHOUT_SKIN (#9360) # Objective - According to the GLTF spec, it should not be possible to have a non skinned mesh on a skinned node > When the node contains skin, all mesh.primitives MUST contain JOINTS_0 and WEIGHTS_0 attributes > https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-node - However, the reverse (a skinned mesh on a non skinned node) is just a warning, see `NODE_SKINNED_MESH_WITHOUT_SKIN` in https://github.com/KhronosGroup/glTF-Validator/blob/main/ISSUES.md#linkerror - This causes a crash in Bevy because the bind group layout is made from the mesh which is skinned, but filled from the entity which is not ``` thread '' panicked at 'wgpu error: Validation Error Caused by: In a RenderPass note: encoder = `` In a set_bind_group command note: bind group = `` Bind group 2 expects 2 dynamic offsets. However 1 dynamic offset were provided. ``` - Blender can export GLTF files with this kind of issues ## Solution - When a skinned mesh is only used on non skinned nodes, ignore skinned information from the mesh and warn the user (this is what three.js is doing) - When a skinned mesh is used on both skinned and non skinned nodes, log an error --- crates/bevy_gltf/src/loader.rs | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/crates/bevy_gltf/src/loader.rs b/crates/bevy_gltf/src/loader.rs index 842290e2d4fcd..87fb8c6318b44 100644 --- a/crates/bevy_gltf/src/loader.rs +++ b/crates/bevy_gltf/src/loader.rs @@ -6,7 +6,7 @@ use bevy_core::Name; use bevy_core_pipeline::prelude::Camera3dBundle; use bevy_ecs::{entity::Entity, world::World}; use bevy_hierarchy::{BuildWorldChildren, WorldChildBuilder}; -use bevy_log::warn; +use bevy_log::{error, warn}; use bevy_math::{Mat4, Vec3}; use bevy_pbr::{ AlphaMode, DirectionalLight, DirectionalLightBundle, PbrBundle, PointLight, PointLightBundle, @@ -36,7 +36,7 @@ use gltf::{ accessor::Iter, mesh::{util::ReadIndices, Mode}, texture::{MagFilter, MinFilter, WrappingMode}, - Material, Node, Primitive, + Material, Node, Primitive, Semantic, }; use serde::Deserialize; use std::{ @@ -329,6 +329,17 @@ async fn load_gltf<'a, 'b, 'c>( let mut meshes = vec![]; let mut named_meshes = HashMap::default(); + let mut meshes_on_skinned_nodes = HashSet::default(); + let mut meshes_on_non_skinned_nodes = HashSet::default(); + for gltf_node in gltf.nodes() { + if gltf_node.skin().is_some() { + if let Some(mesh) = gltf_node.mesh() { + meshes_on_skinned_nodes.insert(mesh.index()); + } + } else if let Some(mesh) = gltf_node.mesh() { + meshes_on_non_skinned_nodes.insert(mesh.index()); + } + } for gltf_mesh in gltf.meshes() { let mut primitives = vec![]; for primitive in gltf_mesh.primitives() { @@ -339,6 +350,18 @@ async fn load_gltf<'a, 'b, 'c>( // Read vertex attributes for (semantic, accessor) in primitive.attributes() { + if [Semantic::Joints(0), Semantic::Weights(0)].contains(&semantic) { + if !meshes_on_skinned_nodes.contains(&gltf_mesh.index()) { + warn!( + "Ignoring attribute {:?} for skinned mesh {:?} used on non skinned nodes (NODE_SKINNED_MESH_WITHOUT_SKIN)", + semantic, + primitive_label + ); + continue; + } else if meshes_on_non_skinned_nodes.contains(&gltf_mesh.index()) { + error!("Skinned mesh {:?} used on both skinned and non skin nodes, this is likely to cause an error (NODE_SKINNED_MESH_WITHOUT_SKIN)", primitive_label); + } + } match convert_attribute( semantic, accessor, From 35073cf7aa9eaf748e167c103921a6764fcedfb1 Mon Sep 17 00:00:00 2001 From: Carter Anderson Date: Fri, 13 Oct 2023 16:17:32 -0700 Subject: [PATCH 02/63] Multiple Asset Sources (#9885) This adds support for **Multiple Asset Sources**. You can now register a named `AssetSource`, which you can load assets from like you normally would: ```rust let shader: Handle = asset_server.load("custom_source://path/to/shader.wgsl"); ``` Notice that `AssetPath` now supports `some_source://` syntax. This can now be accessed through the `asset_path.source()` accessor. Asset source names _are not required_. If one is not specified, the default asset source will be used: ```rust let shader: Handle = asset_server.load("path/to/shader.wgsl"); ``` The behavior of the default asset source has not changed. Ex: the `assets` folder is still the default. As referenced in #9714 ## Why? **Multiple Asset Sources** enables a number of often-asked-for scenarios: * **Loading some assets from other locations on disk**: you could create a `config` asset source that reads from the OS-default config folder (not implemented in this PR) * **Loading some assets from a remote server**: you could register a new `remote` asset source that reads some assets from a remote http server (not implemented in this PR) * **Improved "Binary Embedded" Assets**: we can use this system for "embedded-in-binary assets", which allows us to replace the old `load_internal_asset!` approach, which couldn't support asset processing, didn't support hot-reloading _well_, and didn't make embedded assets accessible to the `AssetServer` (implemented in this pr) ## Adding New Asset Sources An `AssetSource` is "just" a collection of `AssetReader`, `AssetWriter`, and `AssetWatcher` entries. You can configure new asset sources like this: ```rust app.register_asset_source( "other", AssetSource::build() .with_reader(|| Box::new(FileAssetReader::new("other"))) ) ) ``` Note that `AssetSource` construction _must_ be repeatable, which is why a closure is accepted. `AssetSourceBuilder` supports `with_reader`, `with_writer`, `with_watcher`, `with_processed_reader`, `with_processed_writer`, and `with_processed_watcher`. Note that the "asset source" system replaces the old "asset providers" system. ## Processing Multiple Sources The `AssetProcessor` now supports multiple asset sources! Processed assets can refer to assets in other sources and everything "just works". Each `AssetSource` defines an unprocessed and processed `AssetReader` / `AssetWriter`. Currently this is all or nothing for a given `AssetSource`. A given source is either processed or it is not. Later we might want to add support for "lazy asset processing", where an `AssetSource` (such as a remote server) can be configured to only process assets that are directly referenced by local assets (in order to save local disk space and avoid doing extra work). ## A new `AssetSource`: `embedded` One of the big features motivating **Multiple Asset Sources** was improving our "embedded-in-binary" asset loading. To prove out the **Multiple Asset Sources** implementation, I chose to build a new `embedded` `AssetSource`, which replaces the old `load_interal_asset!` system. The old `load_internal_asset!` approach had a number of issues: * The `AssetServer` was not aware of (or capable of loading) internal assets. * Because internal assets weren't visible to the `AssetServer`, they could not be processed (or used by assets that are processed). This would prevent things "preprocessing shaders that depend on built in Bevy shaders", which is something we desperately need to start doing. * Each "internal asset" needed a UUID to be defined in-code to reference it. This was very manual and toilsome. The new `embedded` `AssetSource` enables the following pattern: ```rust // Called in `crates/bevy_pbr/src/render/mesh.rs` embedded_asset!(app, "mesh.wgsl"); // later in the app let shader: Handle = asset_server.load("embedded://bevy_pbr/render/mesh.wgsl"); ``` Notice that this always treats the crate name as the "root path", and it trims out the `src` path for brevity. This is generally predictable, but if you need to debug you can use the new `embedded_path!` macro to get a `PathBuf` that matches the one used by `embedded_asset`. You can also reference embedded assets in arbitrary assets, such as WGSL shaders: ```rust #import "embedded://bevy_pbr/render/mesh.wgsl" ``` This also makes `embedded` assets go through the "normal" asset lifecycle. They are only loaded when they are actually used! We are also discussing implicitly converting asset paths to/from shader modules, so in the future (not in this PR) you might be able to load it like this: ```rust #import bevy_pbr::render::mesh::Vertex ``` Compare that to the old system! ```rust pub const MESH_SHADER_HANDLE: Handle = Handle::weak_from_u128(3252377289100772450); load_internal_asset!(app, MESH_SHADER_HANDLE, "mesh.wgsl", Shader::from_wgsl); // The mesh asset is the _only_ accessible via MESH_SHADER_HANDLE and _cannot_ be loaded via the AssetServer. ``` ## Hot Reloading `embedded` You can enable `embedded` hot reloading by enabling the `embedded_watcher` cargo feature: ``` cargo run --features=embedded_watcher ``` ## Improved Hot Reloading Workflow First: the `filesystem_watcher` cargo feature has been renamed to `file_watcher` for brevity (and to match the `FileAssetReader` naming convention). More importantly, hot asset reloading is no longer configured in-code by default. If you enable any asset watcher feature (such as `file_watcher` or `rust_source_watcher`), asset watching will be automatically enabled. This removes the need to _also_ enable hot reloading in your app code. That means you can replace this: ```rust app.add_plugins(DefaultPlugins.set(AssetPlugin::default().watch_for_changes())) ``` with this: ```rust app.add_plugins(DefaultPlugins) ``` If you want to hot reload assets in your app during development, just run your app like this: ``` cargo run --features=file_watcher ``` This means you can use the same code for development and deployment! To deploy an app, just don't include the watcher feature ``` cargo build --release ``` My intent is to move to this approach for pretty much all dev workflows. In a future PR I would like to replace `AssetMode::ProcessedDev` with a `runtime-processor` cargo feature. We could then group all common "dev" cargo features under a single `dev` feature: ```sh # this would enable file_watcher, embedded_watcher, runtime-processor, and more cargo run --features=dev ``` ## AssetMode `AssetPlugin::Unprocessed`, `AssetPlugin::Processed`, and `AssetPlugin::ProcessedDev` have been replaced with an `AssetMode` field on `AssetPlugin`. ```rust // before app.add_plugins(DefaultPlugins.set(AssetPlugin::Processed { /* fields here */ }) // after app.add_plugins(DefaultPlugins.set(AssetPlugin { mode: AssetMode::Processed, ..default() }) ``` This aligns `AssetPlugin` with our other struct-like plugins. The old "source" and "destination" `AssetProvider` fields in the enum variants have been replaced by the "asset source" system. You no longer need to configure the AssetPlugin to "point" to custom asset providers. ## AssetServerMode To improve the implementation of **Multiple Asset Sources**, `AssetServer` was made aware of whether or not it is using "processed" or "unprocessed" assets. You can check that like this: ```rust if asset_server.mode() == AssetServerMode::Processed { /* do something */ } ``` Note that this refactor should also prepare the way for building "one to many processed output files", as it makes the server aware of whether it is loading from processed or unprocessed sources. Meaning we can store and read processed and unprocessed assets differently! ## AssetPath can now refer to folders The "file only" restriction has been removed from `AssetPath`. The `AssetServer::load_folder` API now accepts an `AssetPath` instead of a `Path`, meaning you can load folders from other asset sources! ## Improved AssetPath Parsing AssetPath parsing was reworked to support sources, improve error messages, and to enable parsing with a single pass over the string. `AssetPath::new` was replaced by `AssetPath::parse` and `AssetPath::try_parse`. ## AssetWatcher broken out from AssetReader `AssetReader` is no longer responsible for constructing `AssetWatcher`. This has been moved to `AssetSourceBuilder`. ## Duplicate Event Debouncing Asset V2 already debounced duplicate filesystem events, but this was _input_ events. Multiple input event types can produce the same _output_ `AssetSourceEvent`. Now that we have `embedded_watcher`, which does expensive file io on events, it made sense to debounce output events too, so I added that! This will also benefit the AssetProcessor by preventing integrity checks for duplicate events (and helps keep the noise down in trace logs). ## Next Steps * **Port Built-in Shaders**: Currently the primary (and essentially only) user of `load_interal_asset` in Bevy's source code is "built-in shaders". I chose not to do that in this PR for a few reasons: 1. We need to add the ability to pass shader defs in to shaders via meta files. Some shaders (such as MESH_VIEW_TYPES) need to pass shader def values in that are defined in code. 2. We need to revisit the current shader module naming system. I think we _probably_ want to imply modules from source structure (at least by default). Ideally in a way that can losslessly convert asset paths to/from shader modules (to enable the asset system to resolve modules using the asset server). 3. I want to keep this change set minimal / get this merged first. * **Deprecate `load_internal_asset`**: we can't do that until we do (1) and (2) * **Relative Asset Paths**: This PR significantly increases the need for relative asset paths (which was already pretty high). Currently when loading dependencies, it is assumed to be an absolute path, which means if in an `AssetLoader` you call `context.load("some/path/image.png")` it will assume that is the "default" asset source, _even if the current asset is in a different asset source_. This will cause breakage for AssetLoaders that are not designed to add the current source to whatever paths are being used. AssetLoaders should generally not need to be aware of the name of their current asset source, or need to think about the "current asset source" generally. We should build apis that support relative asset paths and then encourage using relative paths as much as possible (both via api design and docs). Relative paths are also important because they will allow developers to move folders around (even across providers) without reprocessing, provided there is no path breakage. --- Cargo.toml | 8 +- crates/bevy_asset/Cargo.toml | 4 +- crates/bevy_asset/src/io/android.rs | 7 - .../src/io/embedded/embedded_watcher.rs | 88 +++ crates/bevy_asset/src/io/embedded/mod.rs | 252 ++++++++ crates/bevy_asset/src/io/file/file_watcher.rs | 342 +++++++---- crates/bevy_asset/src/io/file/mod.rs | 25 +- crates/bevy_asset/src/io/gated.rs | 7 - crates/bevy_asset/src/io/memory.rs | 99 ++- crates/bevy_asset/src/io/mod.rs | 17 +- crates/bevy_asset/src/io/processor_gated.rs | 60 +- crates/bevy_asset/src/io/provider.rs | 190 ------ crates/bevy_asset/src/io/source.rs | 553 +++++++++++++++++ crates/bevy_asset/src/io/wasm.rs | 10 +- crates/bevy_asset/src/lib.rs | 283 ++++----- crates/bevy_asset/src/loader.rs | 34 +- crates/bevy_asset/src/path.rs | 217 ++++++- crates/bevy_asset/src/processor/log.rs | 47 +- crates/bevy_asset/src/processor/mod.rs | 568 ++++++++++-------- crates/bevy_asset/src/processor/process.rs | 34 +- crates/bevy_asset/src/server/mod.rs | 183 ++++-- crates/bevy_gltf/src/loader.rs | 2 +- crates/bevy_internal/Cargo.toml | 5 +- .../src/single_threaded_task_pool.rs | 4 +- crates/bevy_utils/src/cow_arc.rs | 10 +- docs/cargo_features.md | 3 +- examples/asset/custom_asset_reader.rs | 28 +- examples/asset/hot_asset_reloading.rs | 5 +- examples/asset/processing/e.txt | 1 + examples/asset/processing/processing.rs | 31 +- examples/scene/scene.rs | 5 +- examples/shader/post_processing.rs | 5 +- examples/tools/scene_viewer/main.rs | 11 +- 33 files changed, 2109 insertions(+), 1029 deletions(-) create mode 100644 crates/bevy_asset/src/io/embedded/embedded_watcher.rs create mode 100644 crates/bevy_asset/src/io/embedded/mod.rs delete mode 100644 crates/bevy_asset/src/io/provider.rs create mode 100644 crates/bevy_asset/src/io/source.rs create mode 100644 examples/asset/processing/e.txt diff --git a/Cargo.toml b/Cargo.toml index 51cb064ffe6cb..5202faf374c66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -245,7 +245,10 @@ shader_format_spirv = ["bevy_internal/shader_format_spirv"] webgl2 = ["bevy_internal/webgl"] # Enables watching the filesystem for Bevy Asset hot-reloading -filesystem_watcher = ["bevy_internal/filesystem_watcher"] +file_watcher = ["bevy_internal/file_watcher"] + +# Enables watching in memory asset providers for Bevy Asset hot-reloading +embedded_watcher = ["bevy_internal/embedded_watcher"] [dependencies] bevy_dylib = { path = "crates/bevy_dylib", version = "0.12.0-dev", default-features = false, optional = true } @@ -1065,6 +1068,7 @@ wasm = true name = "hot_asset_reloading" path = "examples/asset/hot_asset_reloading.rs" doc-scrape-examples = true +required-features = ["file_watcher"] [package.metadata.example.hot_asset_reloading] name = "Hot Reloading of Assets" @@ -1076,7 +1080,7 @@ wasm = true name = "asset_processing" path = "examples/asset/processing/processing.rs" doc-scrape-examples = true -required-features = ["filesystem_watcher"] +required-features = ["file_watcher"] [package.metadata.example.asset_processing] name = "Asset Processing" diff --git a/crates/bevy_asset/Cargo.toml b/crates/bevy_asset/Cargo.toml index c2877844f94fa..52717f627e936 100644 --- a/crates/bevy_asset/Cargo.toml +++ b/crates/bevy_asset/Cargo.toml @@ -11,8 +11,10 @@ keywords = ["bevy"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] -filesystem_watcher = ["notify-debouncer-full"] +file_watcher = ["notify-debouncer-full", "watch"] +embedded_watcher = ["file_watcher"] multi-threaded = ["bevy_tasks/multi-threaded"] +watch = [] [dependencies] bevy_app = { path = "../bevy_app", version = "0.12.0-dev" } diff --git a/crates/bevy_asset/src/io/android.rs b/crates/bevy_asset/src/io/android.rs index a07043c4dd4bc..267f78680d65e 100644 --- a/crates/bevy_asset/src/io/android.rs +++ b/crates/bevy_asset/src/io/android.rs @@ -71,11 +71,4 @@ impl AssetReader for AndroidAssetReader { error!("Reading directories is not supported with the AndroidAssetReader"); Box::pin(async move { Ok(false) }) } - - fn watch_for_changes( - &self, - _event_sender: crossbeam_channel::Sender, - ) -> Option> { - None - } } diff --git a/crates/bevy_asset/src/io/embedded/embedded_watcher.rs b/crates/bevy_asset/src/io/embedded/embedded_watcher.rs new file mode 100644 index 0000000000000..6e92caa5d3bb3 --- /dev/null +++ b/crates/bevy_asset/src/io/embedded/embedded_watcher.rs @@ -0,0 +1,88 @@ +use crate::io::{ + file::{get_asset_path, get_base_path, new_asset_event_debouncer, FilesystemEventHandler}, + memory::Dir, + AssetSourceEvent, AssetWatcher, +}; +use bevy_log::warn; +use bevy_utils::{Duration, HashMap}; +use notify_debouncer_full::{notify::RecommendedWatcher, Debouncer, FileIdMap}; +use parking_lot::RwLock; +use std::{ + fs::File, + io::{BufReader, Read}, + path::{Path, PathBuf}, + sync::Arc, +}; + +/// A watcher for assets stored in the `embedded` asset source. Embedded assets are assets whose +/// bytes have been embedded into the Rust binary using the [`embedded_asset`](crate::embedded_asset) macro. +/// This watcher will watch for changes to the "source files", read the contents of changed files from the file system +/// and overwrite the initial static bytes of the file embedded in the binary with the new dynamically loaded bytes. +pub struct EmbeddedWatcher { + _watcher: Debouncer, +} + +impl EmbeddedWatcher { + pub fn new( + dir: Dir, + root_paths: Arc>>, + sender: crossbeam_channel::Sender, + debounce_wait_time: Duration, + ) -> Self { + let root = get_base_path(); + let handler = EmbeddedEventHandler { + dir, + root: root.clone(), + sender, + root_paths, + last_event: None, + }; + let watcher = new_asset_event_debouncer(root, debounce_wait_time, handler).unwrap(); + Self { _watcher: watcher } + } +} + +impl AssetWatcher for EmbeddedWatcher {} + +/// A [`FilesystemEventHandler`] that uses [`EmbeddedAssetRegistry`](crate::io::embedded::EmbeddedAssetRegistry) to hot-reload +/// binary-embedded Rust source files. This will read the contents of changed files from the file system and overwrite +/// the initial static bytes from the file embedded in the binary. +pub(crate) struct EmbeddedEventHandler { + sender: crossbeam_channel::Sender, + root_paths: Arc>>, + root: PathBuf, + dir: Dir, + last_event: Option, +} +impl FilesystemEventHandler for EmbeddedEventHandler { + fn begin(&mut self) { + self.last_event = None; + } + + fn get_path(&self, absolute_path: &Path) -> Option<(PathBuf, bool)> { + let (local_path, is_meta) = get_asset_path(&self.root, absolute_path); + let final_path = self.root_paths.read().get(&local_path)?.clone(); + if is_meta { + warn!("Meta file asset hot-reloading is not supported yet: {final_path:?}"); + } + Some((final_path, false)) + } + + fn handle(&mut self, absolute_paths: &[PathBuf], event: AssetSourceEvent) { + if self.last_event.as_ref() != Some(&event) { + if let AssetSourceEvent::ModifiedAsset(path) = &event { + if let Ok(file) = File::open(&absolute_paths[0]) { + let mut reader = BufReader::new(file); + let mut buffer = Vec::new(); + + // Read file into vector. + if reader.read_to_end(&mut buffer).is_ok() { + self.dir.insert_asset(path, buffer); + } + } + } + self.last_event = Some(event.clone()); + self.sender.send(event).unwrap(); + } + } +} diff --git a/crates/bevy_asset/src/io/embedded/mod.rs b/crates/bevy_asset/src/io/embedded/mod.rs new file mode 100644 index 0000000000000..e5470cd3d5c3f --- /dev/null +++ b/crates/bevy_asset/src/io/embedded/mod.rs @@ -0,0 +1,252 @@ +#[cfg(feature = "embedded_watcher")] +mod embedded_watcher; + +#[cfg(feature = "embedded_watcher")] +pub use embedded_watcher::*; + +use crate::io::{ + memory::{Dir, MemoryAssetReader, Value}, + AssetSource, AssetSourceBuilders, +}; +use bevy_ecs::system::Resource; +use std::path::{Path, PathBuf}; + +pub const EMBEDDED: &str = "embedded"; + +/// A [`Resource`] that manages "rust source files" in a virtual in memory [`Dir`], which is intended +/// to be shared with a [`MemoryAssetReader`]. +/// Generally this should not be interacted with directly. The [`embedded_asset`] will populate this. +/// +/// [`embedded_asset`]: crate::embedded_asset +#[derive(Resource, Default)] +pub struct EmbeddedAssetRegistry { + dir: Dir, + #[cfg(feature = "embedded_watcher")] + root_paths: std::sync::Arc< + parking_lot::RwLock>, + >, +} + +impl EmbeddedAssetRegistry { + /// Inserts a new asset. `full_path` is the full path (as [`file`] would return for that file, if it was capable of + /// running in a non-rust file). `asset_path` is the path that will be used to identify the asset in the `embedded` + /// [`AssetSource`]. `value` is the bytes that will be returned for the asset. This can be _either_ a `&'static [u8]` + /// or a [`Vec`]. + #[allow(unused)] + pub fn insert_asset(&self, full_path: PathBuf, asset_path: &Path, value: impl Into) { + #[cfg(feature = "embedded_watcher")] + self.root_paths + .write() + .insert(full_path.to_owned(), asset_path.to_owned()); + self.dir.insert_asset(asset_path, value); + } + + /// Inserts new asset metadata. `full_path` is the full path (as [`file`] would return for that file, if it was capable of + /// running in a non-rust file). `asset_path` is the path that will be used to identify the asset in the `embedded` + /// [`AssetSource`]. `value` is the bytes that will be returned for the asset. This can be _either_ a `&'static [u8]` + /// or a [`Vec`]. + #[allow(unused)] + pub fn insert_meta(&self, full_path: &Path, asset_path: &Path, value: impl Into) { + #[cfg(feature = "embedded_watcher")] + self.root_paths + .write() + .insert(full_path.to_owned(), asset_path.to_owned()); + self.dir.insert_meta(asset_path, value); + } + + /// Registers a `embedded` [`AssetSource`] that uses this [`EmbeddedAssetRegistry`]. + // NOTE: unused_mut because embedded_watcher feature is the only mutable consumer of `let mut source` + #[allow(unused_mut)] + pub fn register_source(&self, sources: &mut AssetSourceBuilders) { + let dir = self.dir.clone(); + let processed_dir = self.dir.clone(); + let mut source = AssetSource::build() + .with_reader(move || Box::new(MemoryAssetReader { root: dir.clone() })) + .with_processed_reader(move || { + Box::new(MemoryAssetReader { + root: processed_dir.clone(), + }) + }); + + #[cfg(feature = "embedded_watcher")] + { + let root_paths = self.root_paths.clone(); + let dir = self.dir.clone(); + let processed_root_paths = self.root_paths.clone(); + let processd_dir = self.dir.clone(); + source = source + .with_watcher(move |sender| { + Some(Box::new(EmbeddedWatcher::new( + dir.clone(), + root_paths.clone(), + sender, + std::time::Duration::from_millis(300), + ))) + }) + .with_processed_watcher(move |sender| { + Some(Box::new(EmbeddedWatcher::new( + processd_dir.clone(), + processed_root_paths.clone(), + sender, + std::time::Duration::from_millis(300), + ))) + }); + } + sources.insert(EMBEDDED, source); + } +} + +/// Returns the [`Path`] for a given `embedded` asset. +/// This is used internally by [`embedded_asset`] and can be used to get a [`Path`] +/// that matches the [`AssetPath`](crate::AssetPath) used by that asset. +/// +/// [`embedded_asset`]: crate::embedded_asset +#[macro_export] +macro_rules! embedded_path { + ($path_str: expr) => {{ + embedded_path!("/src/", $path_str) + }}; + + ($source_path: expr, $path_str: expr) => {{ + let crate_name = module_path!().split(':').next().unwrap(); + let after_src = file!().split($source_path).nth(1).unwrap(); + let file_path = std::path::Path::new(after_src) + .parent() + .unwrap() + .join($path_str); + std::path::Path::new(crate_name).join(file_path) + }}; +} + +/// Creates a new `embedded` asset by embedding the bytes of the given path into the current binary +/// and registering those bytes with the `embedded` [`AssetSource`]. +/// +/// This accepts the current [`App`](bevy_app::App) as the first parameter and a path `&str` (relative to the current file) as the second. +/// +/// By default this will generate an [`AssetPath`] using the following rules: +/// +/// 1. Search for the first `$crate_name/src/` in the path and trim to the path past that point. +/// 2. Re-add the current `$crate_name` to the front of the path +/// +/// For example, consider the following file structure in the theoretical `bevy_rock` crate, which provides a Bevy [`Plugin`](bevy_app::Plugin) +/// that renders fancy rocks for scenes. +/// +/// * `bevy_rock` +/// * `src` +/// * `render` +/// * `rock.wgsl` +/// * `mod.rs` +/// * `lib.rs` +/// * `Cargo.toml` +/// +/// `rock.wgsl` is a WGSL shader asset that the `bevy_rock` plugin author wants to bundle with their crate. They invoke the following +/// in `bevy_rock/src/render/mod.rs`: +/// +/// `embedded_asset!(app, "rock.wgsl")` +/// +/// `rock.wgsl` can now be loaded by the [`AssetServer`](crate::AssetServer) with the following path: +/// +/// ```no_run +/// # use bevy_asset::{Asset, AssetServer}; +/// # use bevy_reflect::TypePath; +/// # let asset_server: AssetServer = panic!(); +/// #[derive(Asset, TypePath)] +/// # struct Shader; +/// let shader = asset_server.load::("embedded://bevy_rock/render/rock.wgsl"); +/// ``` +/// +/// Some things to note in the path: +/// 1. The non-default `embedded:://` [`AssetSource`] +/// 2. `src` is trimmed from the path +/// +/// The default behavior also works for cargo workspaces. Pretend the `bevy_rock` crate now exists in a larger workspace in +/// `$SOME_WORKSPACE/crates/bevy_rock`. The asset path would remain the same, because [`embedded_asset`] searches for the +/// _first instance_ of `bevy_rock/src` in the path. +/// +/// For most "standard crate structures" the default works just fine. But for some niche cases (such as cargo examples), +/// the `src` path will not be present. You can override this behavior by adding it as the second argument to [`embedded_asset`]: +/// +/// `embedded_asset!(app, "/examples/rock_stuff/", "rock.wgsl")` +/// +/// When there are three arguments, the second argument will replace the default `/src/` value. Note that these two are +/// equivalent: +/// +/// `embedded_asset!(app, "rock.wgsl")` +/// `embedded_asset!(app, "/src/", "rock.wgsl")` +/// +/// This macro uses the [`include_bytes`] macro internally and _will not_ reallocate the bytes. +/// Generally the [`AssetPath`] generated will be predictable, but if your asset isn't +/// available for some reason, you can use the [`embedded_path`] macro to debug. +/// +/// Hot-reloading `embedded` assets is supported. Just enable the `embedded_watcher` cargo feature. +/// +/// [`AssetPath`]: crate::AssetPath +/// [`embedded_asset`]: crate::embedded_asset +/// [`embedded_path`]: crate::embedded_path +#[macro_export] +macro_rules! embedded_asset { + ($app: ident, $path: expr) => {{ + embedded_asset!($app, "/src/", $path) + }}; + + ($app: ident, $source_path: expr, $path: expr) => {{ + let mut embedded = $app + .world + .resource_mut::<$crate::io::embedded::EmbeddedAssetRegistry>(); + let path = $crate::embedded_path!($source_path, $path); + #[cfg(feature = "embedded_watcher")] + let full_path = std::path::Path::new(file!()).parent().unwrap().join($path); + #[cfg(not(feature = "embedded_watcher"))] + let full_path = std::path::PathBuf::new(); + embedded.insert_asset(full_path, &path, include_bytes!($path)); + }}; +} + +/// Loads an "internal" asset by embedding the string stored in the given `path_str` and associates it with the given handle. +#[macro_export] +macro_rules! load_internal_asset { + ($app: ident, $handle: expr, $path_str: expr, $loader: expr) => {{ + let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); + assets.insert($handle, ($loader)( + include_str!($path_str), + std::path::Path::new(file!()) + .parent() + .unwrap() + .join($path_str) + .to_string_lossy() + )); + }}; + // we can't support params without variadic arguments, so internal assets with additional params can't be hot-reloaded + ($app: ident, $handle: ident, $path_str: expr, $loader: expr $(, $param:expr)+) => {{ + let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); + assets.insert($handle, ($loader)( + include_str!($path_str), + std::path::Path::new(file!()) + .parent() + .unwrap() + .join($path_str) + .to_string_lossy(), + $($param),+ + )); + }}; +} + +/// Loads an "internal" binary asset by embedding the bytes stored in the given `path_str` and associates it with the given handle. +#[macro_export] +macro_rules! load_internal_binary_asset { + ($app: ident, $handle: expr, $path_str: expr, $loader: expr) => {{ + let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); + assets.insert( + $handle, + ($loader)( + include_bytes!($path_str).as_ref(), + std::path::Path::new(file!()) + .parent() + .unwrap() + .join($path_str) + .to_string_lossy() + .into(), + ), + ); + }}; +} diff --git a/crates/bevy_asset/src/io/file/file_watcher.rs b/crates/bevy_asset/src/io/file/file_watcher.rs index 7f2d622135932..d701b225beda7 100644 --- a/crates/bevy_asset/src/io/file/file_watcher.rs +++ b/crates/bevy_asset/src/io/file/file_watcher.rs @@ -13,6 +13,11 @@ use notify_debouncer_full::{ }; use std::path::{Path, PathBuf}; +/// An [`AssetWatcher`] that watches the filesystem for changes to asset files in a given root folder and emits [`AssetSourceEvent`] +/// for each relevant change. This uses [`notify_debouncer_full`] to retrieve "debounced" filesystem events. +/// "Debouncing" defines a time window to hold on to events and then removes duplicate events that fall into this window. +/// This introduces a small delay in processing events, but it helps reduce event duplicates. A small delay is also necessary +/// on some systems to avoid processing a change event before it has actually been applied. pub struct FileWatcher { _watcher: Debouncer, } @@ -23,159 +28,248 @@ impl FileWatcher { sender: Sender, debounce_wait_time: Duration, ) -> Result { - let owned_root = root.clone(); - let mut debouncer = new_debouncer( + let root = super::get_base_path().join(root); + let watcher = new_asset_event_debouncer( + root.clone(), debounce_wait_time, - None, - move |result: DebounceEventResult| { - match result { - Ok(events) => { - for event in events.iter() { - match event.kind { - notify::EventKind::Create(CreateKind::File) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); + FileEventHandler { + root, + sender, + last_event: None, + }, + )?; + Ok(FileWatcher { _watcher: watcher }) + } +} + +impl AssetWatcher for FileWatcher {} + +pub(crate) fn get_asset_path(root: &Path, absolute_path: &Path) -> (PathBuf, bool) { + let relative_path = absolute_path.strip_prefix(root).unwrap(); + let is_meta = relative_path + .extension() + .map(|e| e == "meta") + .unwrap_or(false); + let asset_path = if is_meta { + relative_path.with_extension("") + } else { + relative_path.to_owned() + }; + (asset_path, is_meta) +} + +/// This is a bit more abstracted than it normally would be because we want to try _very hard_ not to duplicate this +/// event management logic across filesystem-driven [`AssetWatcher`] impls. Each operating system / platform behaves +/// a little differently and this is the result of a delicate balancing act that we should only perform once. +pub(crate) fn new_asset_event_debouncer( + root: PathBuf, + debounce_wait_time: Duration, + mut handler: impl FilesystemEventHandler, +) -> Result, notify::Error> { + let root = super::get_base_path().join(root); + let mut debouncer = new_debouncer( + debounce_wait_time, + None, + move |result: DebounceEventResult| { + match result { + Ok(events) => { + handler.begin(); + for event in events.iter() { + match event.kind { + notify::EventKind::Create(CreateKind::File) => { + if let Some((path, is_meta)) = handler.get_path(&event.paths[0]) { if is_meta { - sender.send(AssetSourceEvent::AddedMeta(path)).unwrap(); + handler.handle( + &event.paths, + AssetSourceEvent::AddedMeta(path), + ); } else { - sender.send(AssetSourceEvent::AddedAsset(path)).unwrap(); + handler.handle( + &event.paths, + AssetSourceEvent::AddedAsset(path), + ); } } - notify::EventKind::Create(CreateKind::Folder) => { - let (path, _) = get_asset_path(&owned_root, &event.paths[0]); - sender.send(AssetSourceEvent::AddedFolder(path)).unwrap(); + } + notify::EventKind::Create(CreateKind::Folder) => { + if let Some((path, _)) = handler.get_path(&event.paths[0]) { + handler + .handle(&event.paths, AssetSourceEvent::AddedFolder(path)); } - notify::EventKind::Access(AccessKind::Close(AccessMode::Write)) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); + } + notify::EventKind::Access(AccessKind::Close(AccessMode::Write)) => { + if let Some((path, is_meta)) = handler.get_path(&event.paths[0]) { if is_meta { - sender.send(AssetSourceEvent::ModifiedMeta(path)).unwrap(); + handler.handle( + &event.paths, + AssetSourceEvent::ModifiedMeta(path), + ); } else { - sender.send(AssetSourceEvent::ModifiedAsset(path)).unwrap(); + handler.handle( + &event.paths, + AssetSourceEvent::ModifiedAsset(path), + ); } } - notify::EventKind::Remove(RemoveKind::Any) | - // Because this is debounced over a reasonable period of time, "From" events are assumed to be "dangling" without - // a follow up "To" event. Without debouncing, "From" -> "To" -> "Both" events are emitted for renames. - // If a From is dangling, it is assumed to be "removed" from the context of the asset system. - notify::EventKind::Modify(ModifyKind::Name(RenameMode::From)) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - sender - .send(AssetSourceEvent::RemovedUnknown { path, is_meta }) - .unwrap(); + } + // Because this is debounced over a reasonable period of time, Modify(ModifyKind::Name(RenameMode::From) + // events are assumed to be "dangling" without a follow up "To" event. Without debouncing, "From" -> "To" -> "Both" + // events are emitted for renames. If a From is dangling, it is assumed to be "removed" from the context of the asset + // system. + notify::EventKind::Remove(RemoveKind::Any) + | notify::EventKind::Modify(ModifyKind::Name(RenameMode::From)) => { + if let Some((path, is_meta)) = handler.get_path(&event.paths[0]) { + handler.handle( + &event.paths, + AssetSourceEvent::RemovedUnknown { path, is_meta }, + ); } - notify::EventKind::Create(CreateKind::Any) - | notify::EventKind::Modify(ModifyKind::Name(RenameMode::To)) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - let event = if event.paths[0].is_dir() { + } + notify::EventKind::Create(CreateKind::Any) + | notify::EventKind::Modify(ModifyKind::Name(RenameMode::To)) => { + if let Some((path, is_meta)) = handler.get_path(&event.paths[0]) { + let asset_event = if event.paths[0].is_dir() { AssetSourceEvent::AddedFolder(path) } else if is_meta { AssetSourceEvent::AddedMeta(path) } else { AssetSourceEvent::AddedAsset(path) }; - sender.send(event).unwrap(); + handler.handle(&event.paths, asset_event); } - notify::EventKind::Modify(ModifyKind::Name(RenameMode::Both)) => { - let (old_path, old_is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - let (new_path, new_is_meta) = - get_asset_path(&owned_root, &event.paths[1]); - // only the new "real" path is considered a directory - if event.paths[1].is_dir() { - sender - .send(AssetSourceEvent::RenamedFolder { - old: old_path, - new: new_path, - }) - .unwrap(); - } else { - match (old_is_meta, new_is_meta) { - (true, true) => { - sender - .send(AssetSourceEvent::RenamedMeta { - old: old_path, - new: new_path, - }) - .unwrap(); - } - (false, false) => { - sender - .send(AssetSourceEvent::RenamedAsset { - old: old_path, - new: new_path, - }) - .unwrap(); - } - (true, false) => { - error!( - "Asset metafile {old_path:?} was changed to asset file {new_path:?}, which is not supported. Try restarting your app to see if configuration is still valid" + } + notify::EventKind::Modify(ModifyKind::Name(RenameMode::Both)) => { + let Some((old_path, old_is_meta)) = + handler.get_path(&event.paths[0]) + else { + continue; + }; + let Some((new_path, new_is_meta)) = + handler.get_path(&event.paths[1]) + else { + continue; + }; + // only the new "real" path is considered a directory + if event.paths[1].is_dir() { + handler.handle( + &event.paths, + AssetSourceEvent::RenamedFolder { + old: old_path, + new: new_path, + }, + ); + } else { + match (old_is_meta, new_is_meta) { + (true, true) => { + handler.handle( + &event.paths, + AssetSourceEvent::RenamedMeta { + old: old_path, + new: new_path, + }, ); - } - (false, true) => { - error!( - "Asset file {old_path:?} was changed to meta file {new_path:?}, which is not supported. Try restarting your app to see if configuration is still valid" + } + (false, false) => { + handler.handle( + &event.paths, + AssetSourceEvent::RenamedAsset { + old: old_path, + new: new_path, + }, ); - } + } + (true, false) => { + error!( + "Asset metafile {old_path:?} was changed to asset file {new_path:?}, which is not supported. Try restarting your app to see if configuration is still valid" + ); + } + (false, true) => { + error!( + "Asset file {old_path:?} was changed to meta file {new_path:?}, which is not supported. Try restarting your app to see if configuration is still valid" + ); } } } - notify::EventKind::Modify(_) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - if event.paths[0].is_dir() { - // modified folder means nothing in this case - } else if is_meta { - sender.send(AssetSourceEvent::ModifiedMeta(path)).unwrap(); - } else { - sender.send(AssetSourceEvent::ModifiedAsset(path)).unwrap(); - }; - } - notify::EventKind::Remove(RemoveKind::File) => { - let (path, is_meta) = - get_asset_path(&owned_root, &event.paths[0]); - if is_meta { - sender.send(AssetSourceEvent::RemovedMeta(path)).unwrap(); - } else { - sender.send(AssetSourceEvent::RemovedAsset(path)).unwrap(); - } - } - notify::EventKind::Remove(RemoveKind::Folder) => { - let (path, _) = get_asset_path(&owned_root, &event.paths[0]); - sender.send(AssetSourceEvent::RemovedFolder(path)).unwrap(); + } + notify::EventKind::Modify(_) => { + let Some((path, is_meta)) = handler.get_path(&event.paths[0]) + else { + continue; + }; + if event.paths[0].is_dir() { + // modified folder means nothing in this case + } else if is_meta { + handler + .handle(&event.paths, AssetSourceEvent::ModifiedMeta(path)); + } else { + handler.handle( + &event.paths, + AssetSourceEvent::ModifiedAsset(path), + ); + }; + } + notify::EventKind::Remove(RemoveKind::File) => { + let Some((path, is_meta)) = handler.get_path(&event.paths[0]) + else { + continue; + }; + if is_meta { + handler + .handle(&event.paths, AssetSourceEvent::RemovedMeta(path)); + } else { + handler + .handle(&event.paths, AssetSourceEvent::RemovedAsset(path)); } - _ => {} } + notify::EventKind::Remove(RemoveKind::Folder) => { + let Some((path, _)) = handler.get_path(&event.paths[0]) else { + continue; + }; + handler.handle(&event.paths, AssetSourceEvent::RemovedFolder(path)); + } + _ => {} } } - Err(errors) => errors.iter().for_each(|error| { - error!("Encountered a filesystem watcher error {error:?}"); - }), } - }, - )?; - debouncer.watcher().watch(&root, RecursiveMode::Recursive)?; - debouncer.cache().add_root(&root, RecursiveMode::Recursive); - Ok(Self { - _watcher: debouncer, - }) - } + Err(errors) => errors.iter().for_each(|error| { + error!("Encountered a filesystem watcher error {error:?}"); + }), + } + }, + )?; + debouncer.watcher().watch(&root, RecursiveMode::Recursive)?; + debouncer.cache().add_root(&root, RecursiveMode::Recursive); + Ok(debouncer) } -impl AssetWatcher for FileWatcher {} +pub(crate) struct FileEventHandler { + sender: crossbeam_channel::Sender, + root: PathBuf, + last_event: Option, +} -pub(crate) fn get_asset_path(root: &Path, absolute_path: &Path) -> (PathBuf, bool) { - let relative_path = absolute_path.strip_prefix(root).unwrap(); - let is_meta = relative_path - .extension() - .map(|e| e == "meta") - .unwrap_or(false); - let asset_path = if is_meta { - relative_path.with_extension("") - } else { - relative_path.to_owned() - }; - (asset_path, is_meta) +impl FilesystemEventHandler for FileEventHandler { + fn begin(&mut self) { + self.last_event = None; + } + fn get_path(&self, absolute_path: &Path) -> Option<(PathBuf, bool)> { + Some(get_asset_path(&self.root, absolute_path)) + } + + fn handle(&mut self, _absolute_paths: &[PathBuf], event: AssetSourceEvent) { + if self.last_event.as_ref() != Some(&event) { + self.last_event = Some(event.clone()); + self.sender.send(event).unwrap(); + } + } +} + +pub(crate) trait FilesystemEventHandler: Send + Sync + 'static { + /// Called each time a set of debounced events is processed + fn begin(&mut self); + /// Returns an actual asset path (if one exists for the given `absolute_path`), as well as a [`bool`] that is + /// true if the `absolute_path` corresponds to a meta file. + fn get_path(&self, absolute_path: &Path) -> Option<(PathBuf, bool)>; + /// Handle the given event + fn handle(&mut self, absolute_paths: &[PathBuf], event: AssetSourceEvent); } diff --git a/crates/bevy_asset/src/io/file/mod.rs b/crates/bevy_asset/src/io/file/mod.rs index 859db14eda710..629fd7dd9c659 100644 --- a/crates/bevy_asset/src/io/file/mod.rs +++ b/crates/bevy_asset/src/io/file/mod.rs @@ -1,9 +1,11 @@ -#[cfg(feature = "filesystem_watcher")] +#[cfg(feature = "file_watcher")] mod file_watcher; +#[cfg(feature = "file_watcher")] +pub use file_watcher::*; use crate::io::{ - get_meta_path, AssetReader, AssetReaderError, AssetWatcher, AssetWriter, AssetWriterError, - PathStream, Reader, Writer, + get_meta_path, AssetReader, AssetReaderError, AssetWriter, AssetWriterError, PathStream, + Reader, Writer, }; use async_fs::{read_dir, File}; use bevy_utils::BoxedFuture; @@ -164,23 +166,6 @@ impl AssetReader for FileAssetReader { Ok(metadata.file_type().is_dir()) }) } - - fn watch_for_changes( - &self, - _event_sender: crossbeam_channel::Sender, - ) -> Option> { - #[cfg(feature = "filesystem_watcher")] - return Some(Box::new( - file_watcher::FileWatcher::new( - self.root_path.clone(), - _event_sender, - std::time::Duration::from_millis(300), - ) - .unwrap(), - )); - #[cfg(not(feature = "filesystem_watcher"))] - return None; - } } pub struct FileAssetWriter { diff --git a/crates/bevy_asset/src/io/gated.rs b/crates/bevy_asset/src/io/gated.rs index f200483759d7c..2c96399c0c0f7 100644 --- a/crates/bevy_asset/src/io/gated.rs +++ b/crates/bevy_asset/src/io/gated.rs @@ -96,11 +96,4 @@ impl AssetReader for GatedReader { ) -> BoxedFuture<'a, std::result::Result> { self.reader.is_directory(path) } - - fn watch_for_changes( - &self, - event_sender: Sender, - ) -> Option> { - self.reader.watch_for_changes(event_sender) - } } diff --git a/crates/bevy_asset/src/io/memory.rs b/crates/bevy_asset/src/io/memory.rs index 043592435192c..3dca5042dbd52 100644 --- a/crates/bevy_asset/src/io/memory.rs +++ b/crates/bevy_asset/src/io/memory.rs @@ -40,25 +40,31 @@ impl Dir { self.insert_meta(path, asset.as_bytes().to_vec()); } - pub fn insert_asset(&self, path: &Path, asset: Vec) { + pub fn insert_asset(&self, path: &Path, value: impl Into) { let mut dir = self.clone(); if let Some(parent) = path.parent() { dir = self.get_or_insert_dir(parent); } dir.0.write().assets.insert( path.file_name().unwrap().to_string_lossy().to_string(), - Data(Arc::new((asset, path.to_owned()))), + Data { + value: value.into(), + path: path.to_owned(), + }, ); } - pub fn insert_meta(&self, path: &Path, asset: Vec) { + pub fn insert_meta(&self, path: &Path, value: impl Into) { let mut dir = self.clone(); if let Some(parent) = path.parent() { dir = self.get_or_insert_dir(parent); } dir.0.write().metadata.insert( path.file_name().unwrap().to_string_lossy().to_string(), - Data(Arc::new((asset, path.to_owned()))), + Data { + value: value.into(), + path: path.to_owned(), + }, ); } @@ -117,11 +123,16 @@ impl Dir { pub struct DirStream { dir: Dir, index: usize, + dir_index: usize, } impl DirStream { fn new(dir: Dir) -> Self { - Self { dir, index: 0 } + Self { + dir, + index: 0, + dir_index: 0, + } } } @@ -133,10 +144,17 @@ impl Stream for DirStream { _cx: &mut std::task::Context<'_>, ) -> Poll> { let this = self.get_mut(); - let index = this.index; - this.index += 1; let dir = this.dir.0.read(); - Poll::Ready(dir.assets.values().nth(index).map(|d| d.path().to_owned())) + + let dir_index = this.dir_index; + if let Some(dir_path) = dir.dirs.keys().nth(dir_index).map(|d| dir.path.join(d)) { + this.dir_index += 1; + Poll::Ready(Some(dir_path)) + } else { + let index = this.index; + this.index += 1; + Poll::Ready(dir.assets.values().nth(index).map(|d| d.path().to_owned())) + } } } @@ -149,14 +167,45 @@ pub struct MemoryAssetReader { /// Asset data stored in a [`Dir`]. #[derive(Clone, Debug)] -pub struct Data(Arc<(Vec, PathBuf)>); +pub struct Data { + path: PathBuf, + value: Value, +} + +/// Stores either an allocated vec of bytes or a static array of bytes. +#[derive(Clone, Debug)] +pub enum Value { + Vec(Arc>), + Static(&'static [u8]), +} impl Data { fn path(&self) -> &Path { - &self.0 .1 + &self.path } - fn data(&self) -> &[u8] { - &self.0 .0 + fn value(&self) -> &[u8] { + match &self.value { + Value::Vec(vec) => vec, + Value::Static(value) => value, + } + } +} + +impl From> for Value { + fn from(value: Vec) -> Self { + Self::Vec(Arc::new(value)) + } +} + +impl From<&'static [u8]> for Value { + fn from(value: &'static [u8]) -> Self { + Self::Static(value) + } +} + +impl From<&'static [u8; N]> for Value { + fn from(value: &'static [u8; N]) -> Self { + Self::Static(value) } } @@ -171,10 +220,11 @@ impl AsyncRead for DataReader { cx: &mut std::task::Context<'_>, buf: &mut [u8], ) -> std::task::Poll> { - if self.bytes_read >= self.data.data().len() { + if self.bytes_read >= self.data.value().len() { Poll::Ready(Ok(0)) } else { - let n = ready!(Pin::new(&mut &self.data.data()[self.bytes_read..]).poll_read(cx, buf))?; + let n = + ready!(Pin::new(&mut &self.data.value()[self.bytes_read..]).poll_read(cx, buf))?; self.bytes_read += n; Poll::Ready(Ok(n)) } @@ -196,7 +246,7 @@ impl AssetReader for MemoryAssetReader { }); reader }) - .ok_or(AssetReaderError::NotFound(PathBuf::new())) + .ok_or_else(|| AssetReaderError::NotFound(path.to_path_buf())) }) } @@ -214,7 +264,7 @@ impl AssetReader for MemoryAssetReader { }); reader }) - .ok_or(AssetReaderError::NotFound(PathBuf::new())) + .ok_or_else(|| AssetReaderError::NotFound(path.to_path_buf())) }) } @@ -229,7 +279,7 @@ impl AssetReader for MemoryAssetReader { let stream: Box = Box::new(DirStream::new(dir)); stream }) - .ok_or(AssetReaderError::NotFound(PathBuf::new())) + .ok_or_else(|| AssetReaderError::NotFound(path.to_path_buf())) }) } @@ -239,13 +289,6 @@ impl AssetReader for MemoryAssetReader { ) -> BoxedFuture<'a, std::result::Result> { Box::pin(async move { Ok(self.root.get_dir(path).is_some()) }) } - - fn watch_for_changes( - &self, - _event_sender: crossbeam_channel::Sender, - ) -> Option> { - None - } } #[cfg(test)] @@ -263,12 +306,12 @@ pub mod test { dir.insert_asset(a_path, a_data.clone()); let asset = dir.get_asset(a_path).unwrap(); assert_eq!(asset.path(), a_path); - assert_eq!(asset.data(), a_data); + assert_eq!(asset.value(), a_data); dir.insert_meta(a_path, a_meta.clone()); let meta = dir.get_metadata(a_path).unwrap(); assert_eq!(meta.path(), a_path); - assert_eq!(meta.data(), a_meta); + assert_eq!(meta.value(), a_meta); let b_path = Path::new("x/y/b.txt"); let b_data = "b".as_bytes().to_vec(); @@ -278,10 +321,10 @@ pub mod test { let asset = dir.get_asset(b_path).unwrap(); assert_eq!(asset.path(), b_path); - assert_eq!(asset.data(), b_data); + assert_eq!(asset.value(), b_data); let meta = dir.get_metadata(b_path).unwrap(); assert_eq!(meta.path(), b_path); - assert_eq!(meta.data(), b_meta); + assert_eq!(meta.value(), b_meta); } } diff --git a/crates/bevy_asset/src/io/mod.rs b/crates/bevy_asset/src/io/mod.rs index a29902c5837b2..14e52cddcb597 100644 --- a/crates/bevy_asset/src/io/mod.rs +++ b/crates/bevy_asset/src/io/mod.rs @@ -1,5 +1,6 @@ #[cfg(target_os = "android")] pub mod android; +pub mod embedded; #[cfg(not(target_arch = "wasm32"))] pub mod file; pub mod gated; @@ -8,13 +9,12 @@ pub mod processor_gated; #[cfg(target_arch = "wasm32")] pub mod wasm; -mod provider; +mod source; pub use futures_lite::{AsyncReadExt, AsyncWriteExt}; -pub use provider::*; +pub use source::*; use bevy_utils::BoxedFuture; -use crossbeam_channel::Sender; use futures_io::{AsyncRead, AsyncWrite}; use futures_lite::{ready, Stream}; use std::{ @@ -65,13 +65,6 @@ pub trait AssetReader: Send + Sync + 'static { path: &'a Path, ) -> BoxedFuture<'a, Result>; - /// Returns an Asset watcher that will send events on the given channel. - /// If this reader does not support watching for changes, this will return [`None`]. - fn watch_for_changes( - &self, - event_sender: Sender, - ) -> Option>; - /// Reads asset metadata bytes at the given `path` into a [`Vec`]. This is a convenience /// function that wraps [`AssetReader::read_meta`] by default. fn read_meta_bytes<'a>( @@ -179,7 +172,7 @@ pub trait AssetWriter: Send + Sync + 'static { } /// An "asset source change event" that occurs whenever asset (or asset metadata) is created/added/removed -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum AssetSourceEvent { /// An asset at this path was added. AddedAsset(PathBuf), @@ -218,8 +211,6 @@ pub enum AssetSourceEvent { /// A handle to an "asset watcher" process, that will listen for and emit [`AssetSourceEvent`] values for as long as /// [`AssetWatcher`] has not been dropped. -/// -/// See [`AssetReader::watch_for_changes`]. pub trait AssetWatcher: Send + Sync + 'static {} /// An [`AsyncRead`] implementation capable of reading a [`Vec`]. diff --git a/crates/bevy_asset/src/io/processor_gated.rs b/crates/bevy_asset/src/io/processor_gated.rs index 1e578771f723e..1705900aeb9e1 100644 --- a/crates/bevy_asset/src/io/processor_gated.rs +++ b/crates/bevy_asset/src/io/processor_gated.rs @@ -1,5 +1,5 @@ use crate::{ - io::{AssetReader, AssetReaderError, PathStream, Reader}, + io::{AssetReader, AssetReaderError, AssetSourceId, PathStream, Reader}, processor::{AssetProcessorData, ProcessStatus}, AssetPath, }; @@ -15,13 +15,19 @@ use std::{path::Path, pin::Pin, sync::Arc}; /// [`AssetProcessor`]: crate::processor::AssetProcessor pub struct ProcessorGatedReader { reader: Box, + source: AssetSourceId<'static>, processor_data: Arc, } impl ProcessorGatedReader { /// Creates a new [`ProcessorGatedReader`]. - pub fn new(reader: Box, processor_data: Arc) -> Self { + pub fn new( + source: AssetSourceId<'static>, + reader: Box, + processor_data: Arc, + ) -> Self { Self { + source, processor_data, reader, } @@ -31,12 +37,12 @@ impl ProcessorGatedReader { /// while it is held. async fn get_transaction_lock( &self, - path: &Path, + path: &AssetPath<'static>, ) -> Result, AssetReaderError> { let infos = self.processor_data.asset_infos.read().await; let info = infos - .get(&AssetPath::from_path(path.to_path_buf())) - .ok_or_else(|| AssetReaderError::NotFound(path.to_owned()))?; + .get(path) + .ok_or_else(|| AssetReaderError::NotFound(path.path().to_owned()))?; Ok(info.file_transaction_lock.read_arc().await) } } @@ -47,20 +53,20 @@ impl AssetReader for ProcessorGatedReader { path: &'a Path, ) -> BoxedFuture<'a, Result>, AssetReaderError>> { Box::pin(async move { - trace!("Waiting for processing to finish before reading {:?}", path); - let process_result = self.processor_data.wait_until_processed(path).await; + let asset_path = AssetPath::from(path.to_path_buf()).with_source(self.source.clone()); + trace!("Waiting for processing to finish before reading {asset_path}"); + let process_result = self + .processor_data + .wait_until_processed(asset_path.clone()) + .await; match process_result { ProcessStatus::Processed => {} ProcessStatus::Failed | ProcessStatus::NonExistent => { - return Err(AssetReaderError::NotFound(path.to_owned())) + return Err(AssetReaderError::NotFound(path.to_owned())); } } - trace!( - "Processing finished with {:?}, reading {:?}", - process_result, - path - ); - let lock = self.get_transaction_lock(path).await?; + trace!("Processing finished with {asset_path}, reading {process_result:?}",); + let lock = self.get_transaction_lock(&asset_path).await?; let asset_reader = self.reader.read(path).await?; let reader: Box> = Box::new(TransactionLockedReader::new(asset_reader, lock)); @@ -73,23 +79,20 @@ impl AssetReader for ProcessorGatedReader { path: &'a Path, ) -> BoxedFuture<'a, Result>, AssetReaderError>> { Box::pin(async move { - trace!( - "Waiting for processing to finish before reading meta {:?}", - path - ); - let process_result = self.processor_data.wait_until_processed(path).await; + let asset_path = AssetPath::from(path.to_path_buf()).with_source(self.source.clone()); + trace!("Waiting for processing to finish before reading meta for {asset_path}",); + let process_result = self + .processor_data + .wait_until_processed(asset_path.clone()) + .await; match process_result { ProcessStatus::Processed => {} ProcessStatus::Failed | ProcessStatus::NonExistent => { return Err(AssetReaderError::NotFound(path.to_owned())); } } - trace!( - "Processing finished with {:?}, reading meta {:?}", - process_result, - path - ); - let lock = self.get_transaction_lock(path).await?; + trace!("Processing finished with {process_result:?}, reading meta for {asset_path}",); + let lock = self.get_transaction_lock(&asset_path).await?; let meta_reader = self.reader.read_meta(path).await?; let reader: Box> = Box::new(TransactionLockedReader::new(meta_reader, lock)); Ok(reader) @@ -127,13 +130,6 @@ impl AssetReader for ProcessorGatedReader { Ok(result) }) } - - fn watch_for_changes( - &self, - event_sender: crossbeam_channel::Sender, - ) -> Option> { - self.reader.watch_for_changes(event_sender) - } } /// An [`AsyncRead`] impl that will hold its asset's transaction lock until [`TransactionLockedReader`] is dropped. diff --git a/crates/bevy_asset/src/io/provider.rs b/crates/bevy_asset/src/io/provider.rs deleted file mode 100644 index d41d8248ce042..0000000000000 --- a/crates/bevy_asset/src/io/provider.rs +++ /dev/null @@ -1,190 +0,0 @@ -use bevy_ecs::system::Resource; -use bevy_utils::HashMap; - -use crate::{ - io::{AssetReader, AssetWriter}, - AssetPlugin, -}; - -/// A reference to an "asset provider", which maps to an [`AssetReader`] and/or [`AssetWriter`]. -#[derive(Default, Clone, Debug)] -pub enum AssetProvider { - /// The default asset provider - #[default] - Default, - /// A custom / named asset provider - Custom(String), -} - -/// A [`Resource`] that hold (repeatable) functions capable of producing new [`AssetReader`] and [`AssetWriter`] instances -/// for a given [`AssetProvider`]. -#[derive(Resource, Default)] -pub struct AssetProviders { - readers: HashMap Box + Send + Sync>>, - writers: HashMap Box + Send + Sync>>, - default_file_source: Option, - default_file_destination: Option, -} - -impl AssetProviders { - /// Inserts a new `get_reader` function with the given `provider` name. This function will be used to create new [`AssetReader`]s - /// when they are requested for the given `provider`. - pub fn insert_reader( - &mut self, - provider: &str, - get_reader: impl FnMut() -> Box + Send + Sync + 'static, - ) { - self.readers - .insert(provider.to_string(), Box::new(get_reader)); - } - /// Inserts a new `get_reader` function with the given `provider` name. This function will be used to create new [`AssetReader`]s - /// when they are requested for the given `provider`. - pub fn with_reader( - mut self, - provider: &str, - get_reader: impl FnMut() -> Box + Send + Sync + 'static, - ) -> Self { - self.insert_reader(provider, get_reader); - self - } - /// Inserts a new `get_writer` function with the given `provider` name. This function will be used to create new [`AssetWriter`]s - /// when they are requested for the given `provider`. - pub fn insert_writer( - &mut self, - provider: &str, - get_writer: impl FnMut() -> Box + Send + Sync + 'static, - ) { - self.writers - .insert(provider.to_string(), Box::new(get_writer)); - } - /// Inserts a new `get_writer` function with the given `provider` name. This function will be used to create new [`AssetWriter`]s - /// when they are requested for the given `provider`. - pub fn with_writer( - mut self, - provider: &str, - get_writer: impl FnMut() -> Box + Send + Sync + 'static, - ) -> Self { - self.insert_writer(provider, get_writer); - self - } - /// Returns the default "asset source" path for the [`FileAssetReader`] and [`FileAssetWriter`]. - /// - /// [`FileAssetReader`]: crate::io::file::FileAssetReader - /// [`FileAssetWriter`]: crate::io::file::FileAssetWriter - pub fn default_file_source(&self) -> &str { - self.default_file_source - .as_deref() - .unwrap_or(AssetPlugin::DEFAULT_FILE_SOURCE) - } - - /// Sets the default "asset source" path for the [`FileAssetReader`] and [`FileAssetWriter`]. - /// - /// [`FileAssetReader`]: crate::io::file::FileAssetReader - /// [`FileAssetWriter`]: crate::io::file::FileAssetWriter - pub fn with_default_file_source(mut self, path: String) -> Self { - self.default_file_source = Some(path); - self - } - - /// Sets the default "asset destination" path for the [`FileAssetReader`] and [`FileAssetWriter`]. - /// - /// [`FileAssetReader`]: crate::io::file::FileAssetReader - /// [`FileAssetWriter`]: crate::io::file::FileAssetWriter - pub fn with_default_file_destination(mut self, path: String) -> Self { - self.default_file_destination = Some(path); - self - } - - /// Returns the default "asset destination" path for the [`FileAssetReader`] and [`FileAssetWriter`]. - /// - /// [`FileAssetReader`]: crate::io::file::FileAssetReader - /// [`FileAssetWriter`]: crate::io::file::FileAssetWriter - pub fn default_file_destination(&self) -> &str { - self.default_file_destination - .as_deref() - .unwrap_or(AssetPlugin::DEFAULT_FILE_DESTINATION) - } - - /// Returns a new "source" [`AssetReader`] for the given [`AssetProvider`]. - pub fn get_source_reader(&mut self, provider: &AssetProvider) -> Box { - match provider { - AssetProvider::Default => { - #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] - let reader = super::file::FileAssetReader::new(self.default_file_source()); - #[cfg(target_arch = "wasm32")] - let reader = super::wasm::HttpWasmAssetReader::new(self.default_file_source()); - #[cfg(target_os = "android")] - let reader = super::android::AndroidAssetReader; - Box::new(reader) - } - AssetProvider::Custom(provider) => { - let get_reader = self - .readers - .get_mut(provider) - .unwrap_or_else(|| panic!("Asset Provider {} does not exist", provider)); - (get_reader)() - } - } - } - /// Returns a new "destination" [`AssetReader`] for the given [`AssetProvider`]. - pub fn get_destination_reader(&mut self, provider: &AssetProvider) -> Box { - match provider { - AssetProvider::Default => { - #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] - let reader = super::file::FileAssetReader::new(self.default_file_destination()); - #[cfg(target_arch = "wasm32")] - let reader = super::wasm::HttpWasmAssetReader::new(self.default_file_destination()); - #[cfg(target_os = "android")] - let reader = super::android::AndroidAssetReader; - Box::new(reader) - } - AssetProvider::Custom(provider) => { - let get_reader = self - .readers - .get_mut(provider) - .unwrap_or_else(|| panic!("Asset Provider {} does not exist", provider)); - (get_reader)() - } - } - } - /// Returns a new "source" [`AssetWriter`] for the given [`AssetProvider`]. - pub fn get_source_writer(&mut self, provider: &AssetProvider) -> Box { - match provider { - AssetProvider::Default => { - #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] - return Box::new(super::file::FileAssetWriter::new( - self.default_file_source(), - )); - #[cfg(any(target_arch = "wasm32", target_os = "android"))] - panic!("Writing assets isn't supported on this platform yet"); - } - AssetProvider::Custom(provider) => { - let get_writer = self - .writers - .get_mut(provider) - .unwrap_or_else(|| panic!("Asset Provider {} does not exist", provider)); - (get_writer)() - } - } - } - /// Returns a new "destination" [`AssetWriter`] for the given [`AssetProvider`]. - pub fn get_destination_writer(&mut self, provider: &AssetProvider) -> Box { - match provider { - AssetProvider::Default => { - #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] - return Box::new(super::file::FileAssetWriter::new( - self.default_file_destination(), - )); - #[cfg(any(target_arch = "wasm32", target_os = "android"))] - panic!("Writing assets isn't supported on this platform yet"); - } - AssetProvider::Custom(provider) => { - let get_writer = self - .writers - .get_mut(provider) - .unwrap_or_else(|| panic!("Asset Provider {} does not exist", provider)); - (get_writer)() - } - } - } -} diff --git a/crates/bevy_asset/src/io/source.rs b/crates/bevy_asset/src/io/source.rs new file mode 100644 index 0000000000000..ea07f8d39a4f3 --- /dev/null +++ b/crates/bevy_asset/src/io/source.rs @@ -0,0 +1,553 @@ +use crate::{ + io::{ + processor_gated::ProcessorGatedReader, AssetReader, AssetSourceEvent, AssetWatcher, + AssetWriter, + }, + processor::AssetProcessorData, +}; +use bevy_ecs::system::Resource; +use bevy_log::{error, warn}; +use bevy_utils::{CowArc, Duration, HashMap}; +use std::{fmt::Display, hash::Hash, sync::Arc}; +use thiserror::Error; + +/// A reference to an "asset source", which maps to an [`AssetReader`] and/or [`AssetWriter`]. +/// +/// * [`AssetSourceId::Default`] corresponds to "default asset paths" that don't specify a source: `/path/to/asset.png` +/// * [`AssetSourceId::Name`] corresponds to asset paths that _do_ specify a source: `remote://path/to/asset.png`, where `remote` is the name. +#[derive(Default, Clone, Debug, Eq)] +pub enum AssetSourceId<'a> { + /// The default asset source. + #[default] + Default, + /// A non-default named asset source. + Name(CowArc<'a, str>), +} + +impl<'a> Display for AssetSourceId<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.as_str() { + None => write!(f, "AssetSourceId::Default"), + Some(v) => write!(f, "AssetSourceId::Name({v})"), + } + } +} + +impl<'a> AssetSourceId<'a> { + /// Creates a new [`AssetSourceId`] + pub fn new(source: Option>>) -> AssetSourceId<'a> { + match source { + Some(source) => AssetSourceId::Name(source.into()), + None => AssetSourceId::Default, + } + } + + /// Returns [`None`] if this is [`AssetSourceId::Default`] and [`Some`] containing the + /// the name if this is [`AssetSourceId::Name`]. + pub fn as_str(&self) -> Option<&str> { + match self { + AssetSourceId::Default => None, + AssetSourceId::Name(v) => Some(v), + } + } + + /// If this is not already an owned / static id, create one. Otherwise, it will return itself (with a static lifetime). + pub fn into_owned(self) -> AssetSourceId<'static> { + match self { + AssetSourceId::Default => AssetSourceId::Default, + AssetSourceId::Name(v) => AssetSourceId::Name(v.into_owned()), + } + } + + /// Clones into an owned [`AssetSourceId<'static>`]. + /// This is equivalent to `.clone().into_owned()`. + #[inline] + pub fn clone_owned(&self) -> AssetSourceId<'static> { + self.clone().into_owned() + } +} + +impl From<&'static str> for AssetSourceId<'static> { + fn from(value: &'static str) -> Self { + AssetSourceId::Name(value.into()) + } +} + +impl<'a, 'b> From<&'a AssetSourceId<'b>> for AssetSourceId<'b> { + fn from(value: &'a AssetSourceId<'b>) -> Self { + value.clone() + } +} + +impl From> for AssetSourceId<'static> { + fn from(value: Option<&'static str>) -> Self { + match value { + Some(value) => AssetSourceId::Name(value.into()), + None => AssetSourceId::Default, + } + } +} + +impl From for AssetSourceId<'static> { + fn from(value: String) -> Self { + AssetSourceId::Name(value.into()) + } +} + +impl<'a> Hash for AssetSourceId<'a> { + fn hash(&self, state: &mut H) { + self.as_str().hash(state); + } +} + +impl<'a> PartialEq for AssetSourceId<'a> { + fn eq(&self, other: &Self) -> bool { + self.as_str().eq(&other.as_str()) + } +} + +/// Metadata about an "asset source", such as how to construct the [`AssetReader`] and [`AssetWriter`] for the source, +/// and whether or not the source is processed. +#[derive(Default)] +pub struct AssetSourceBuilder { + pub reader: Option Box + Send + Sync>>, + pub writer: Option Option> + Send + Sync>>, + pub watcher: Option< + Box< + dyn FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync, + >, + >, + pub processed_reader: Option Box + Send + Sync>>, + pub processed_writer: Option Option> + Send + Sync>>, + pub processed_watcher: Option< + Box< + dyn FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync, + >, + >, +} + +impl AssetSourceBuilder { + /// Builds a new [`AssetSource`] with the given `id`. If `watch` is true, the unprocessed source will watch for changes. + /// If `watch_processed` is true, the processed source will watch for changes. + pub fn build( + &mut self, + id: AssetSourceId<'static>, + watch: bool, + watch_processed: bool, + ) -> Option { + let reader = (self.reader.as_mut()?)(); + let writer = self.writer.as_mut().and_then(|w| (w)()); + let processed_writer = self.processed_writer.as_mut().and_then(|w| (w)()); + let mut source = AssetSource { + id: id.clone(), + reader, + writer, + processed_reader: self.processed_reader.as_mut().map(|r| (r)()), + processed_writer, + event_receiver: None, + watcher: None, + processed_event_receiver: None, + processed_watcher: None, + }; + + if watch { + let (sender, receiver) = crossbeam_channel::unbounded(); + match self.watcher.as_mut().and_then(|w|(w)(sender)) { + Some(w) => { + source.watcher = Some(w); + source.event_receiver = Some(receiver); + }, + None => warn!("{id} does not have an AssetWatcher configured. Consider enabling the `file_watcher` feature. Note that Web and Android do not currently support watching assets."), + } + } + + if watch_processed { + let (sender, receiver) = crossbeam_channel::unbounded(); + match self.processed_watcher.as_mut().and_then(|w|(w)(sender)) { + Some(w) => { + source.processed_watcher = Some(w); + source.processed_event_receiver = Some(receiver); + }, + None => warn!("{id} does not have a processed AssetWatcher configured. Consider enabling the `file_watcher` feature. Note that Web and Android do not currently support watching assets."), + } + } + Some(source) + } + + /// Will use the given `reader` function to construct unprocessed [`AssetReader`] instances. + pub fn with_reader( + mut self, + reader: impl FnMut() -> Box + Send + Sync + 'static, + ) -> Self { + self.reader = Some(Box::new(reader)); + self + } + + /// Will use the given `writer` function to construct unprocessed [`AssetWriter`] instances. + pub fn with_writer( + mut self, + writer: impl FnMut() -> Option> + Send + Sync + 'static, + ) -> Self { + self.writer = Some(Box::new(writer)); + self + } + + /// Will use the given `watcher` function to construct unprocessed [`AssetWatcher`] instances. + pub fn with_watcher( + mut self, + watcher: impl FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync + + 'static, + ) -> Self { + self.watcher = Some(Box::new(watcher)); + self + } + + /// Will use the given `reader` function to construct processed [`AssetReader`] instances. + pub fn with_processed_reader( + mut self, + reader: impl FnMut() -> Box + Send + Sync + 'static, + ) -> Self { + self.processed_reader = Some(Box::new(reader)); + self + } + + /// Will use the given `writer` function to construct processed [`AssetWriter`] instances. + pub fn with_processed_writer( + mut self, + writer: impl FnMut() -> Option> + Send + Sync + 'static, + ) -> Self { + self.processed_writer = Some(Box::new(writer)); + self + } + + /// Will use the given `watcher` function to construct processed [`AssetWatcher`] instances. + pub fn with_processed_watcher( + mut self, + watcher: impl FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync + + 'static, + ) -> Self { + self.processed_watcher = Some(Box::new(watcher)); + self + } + + /// Returns a builder containing the "platform default source" for the given `path` and `processed_path`. + /// For most platforms, this will use [`FileAssetReader`](crate::io::file::FileAssetReader) / [`FileAssetWriter`](crate::io::file::FileAssetWriter), + /// but some platforms (such as Android) have their own default readers / writers / watchers. + pub fn platform_default(path: &str, processed_path: &str) -> Self { + Self::default() + .with_reader(AssetSource::get_default_reader(path.to_string())) + .with_writer(AssetSource::get_default_writer(path.to_string())) + .with_watcher(AssetSource::get_default_watcher( + path.to_string(), + Duration::from_millis(300), + )) + .with_processed_reader(AssetSource::get_default_reader(processed_path.to_string())) + .with_processed_writer(AssetSource::get_default_writer(processed_path.to_string())) + .with_processed_watcher(AssetSource::get_default_watcher( + processed_path.to_string(), + Duration::from_millis(300), + )) + } +} + +/// A [`Resource`] that hold (repeatable) functions capable of producing new [`AssetReader`] and [`AssetWriter`] instances +/// for a given asset source. +#[derive(Resource, Default)] +pub struct AssetSourceBuilders { + sources: HashMap, AssetSourceBuilder>, + default: Option, +} + +impl AssetSourceBuilders { + /// Inserts a new builder with the given `id` + pub fn insert(&mut self, id: impl Into>, source: AssetSourceBuilder) { + match id.into() { + AssetSourceId::Default => { + self.default = Some(source); + } + AssetSourceId::Name(name) => { + self.sources.insert(name, source); + } + } + } + + /// Gets a mutable builder with the given `id`, if it exists. + pub fn get_mut<'a, 'b>( + &'a mut self, + id: impl Into>, + ) -> Option<&'a mut AssetSourceBuilder> { + match id.into() { + AssetSourceId::Default => self.default.as_mut(), + AssetSourceId::Name(name) => self.sources.get_mut(&name.into_owned()), + } + } + + /// Builds a new [`AssetSources`] collection. If `watch` is true, the unprocessed sources will watch for changes. + /// If `watch_processed` is true, the processed sources will watch for changes. + pub fn build_sources(&mut self, watch: bool, watch_processed: bool) -> AssetSources { + let mut sources = HashMap::new(); + for (id, source) in &mut self.sources { + if let Some(data) = source.build( + AssetSourceId::Name(id.clone_owned()), + watch, + watch_processed, + ) { + sources.insert(id.clone_owned(), data); + } + } + + AssetSources { + sources, + default: self + .default + .as_mut() + .and_then(|p| p.build(AssetSourceId::Default, watch, watch_processed)) + .expect(MISSING_DEFAULT_SOURCE), + } + } + + /// Initializes the default [`AssetSourceBuilder`] if it has not already been set. + pub fn init_default_source(&mut self, path: &str, processed_path: &str) { + self.default + .get_or_insert_with(|| AssetSourceBuilder::platform_default(path, processed_path)); + } +} + +/// A collection of unprocessed and processed [`AssetReader`], [`AssetWriter`], and [`AssetWatcher`] instances +/// for a specific asset source, identified by an [`AssetSourceId`]. +pub struct AssetSource { + id: AssetSourceId<'static>, + reader: Box, + writer: Option>, + processed_reader: Option>, + processed_writer: Option>, + watcher: Option>, + processed_watcher: Option>, + event_receiver: Option>, + processed_event_receiver: Option>, +} + +impl AssetSource { + /// Starts building a new [`AssetSource`]. + pub fn build() -> AssetSourceBuilder { + AssetSourceBuilder::default() + } + + /// Returns this source's id. + #[inline] + pub fn id(&self) -> AssetSourceId<'static> { + self.id.clone() + } + + /// Return's this source's unprocessed [`AssetReader`]. + #[inline] + pub fn reader(&self) -> &dyn AssetReader { + &*self.reader + } + + /// Return's this source's unprocessed [`AssetWriter`], if it exists. + #[inline] + pub fn writer(&self) -> Result<&dyn AssetWriter, MissingAssetWriterError> { + self.writer + .as_deref() + .ok_or_else(|| MissingAssetWriterError(self.id.clone_owned())) + } + + /// Return's this source's processed [`AssetReader`], if it exists. + #[inline] + pub fn processed_reader(&self) -> Result<&dyn AssetReader, MissingProcessedAssetReaderError> { + self.processed_reader + .as_deref() + .ok_or_else(|| MissingProcessedAssetReaderError(self.id.clone_owned())) + } + + /// Return's this source's processed [`AssetWriter`], if it exists. + #[inline] + pub fn processed_writer(&self) -> Result<&dyn AssetWriter, MissingProcessedAssetWriterError> { + self.processed_writer + .as_deref() + .ok_or_else(|| MissingProcessedAssetWriterError(self.id.clone_owned())) + } + + /// Return's this source's unprocessed event receiver, if the source is currently watching for changes. + #[inline] + pub fn event_receiver(&self) -> Option<&crossbeam_channel::Receiver> { + self.event_receiver.as_ref() + } + + /// Return's this source's processed event receiver, if the source is currently watching for changes. + #[inline] + pub fn processed_event_receiver( + &self, + ) -> Option<&crossbeam_channel::Receiver> { + self.processed_event_receiver.as_ref() + } + + /// Returns true if the assets in this source should be processed. + #[inline] + pub fn should_process(&self) -> bool { + self.processed_writer.is_some() + } + + /// Returns a builder function for this platform's default [`AssetReader`]. `path` is the relative path to + /// the asset root. + pub fn get_default_reader(_path: String) -> impl FnMut() -> Box + Send + Sync { + move || { + #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] + return Box::new(super::file::FileAssetReader::new(&_path)); + #[cfg(target_arch = "wasm32")] + return Box::new(super::wasm::HttpWasmAssetReader::new(&_path)); + #[cfg(target_os = "android")] + return Box::new(super::android::AndroidAssetReader); + } + } + + /// Returns a builder function for this platform's default [`AssetWriter`]. `path` is the relative path to + /// the asset root. This will return [`None`] if this platform does not support writing assets by default. + pub fn get_default_writer( + _path: String, + ) -> impl FnMut() -> Option> + Send + Sync { + move || { + #[cfg(all(not(target_arch = "wasm32"), not(target_os = "android")))] + return Some(Box::new(super::file::FileAssetWriter::new(&_path))); + #[cfg(any(target_arch = "wasm32", target_os = "android"))] + return None; + } + } + + /// Returns a builder function for this platform's default [`AssetWatcher`]. `path` is the relative path to + /// the asset root. This will return [`None`] if this platform does not support watching assets by default. + /// `file_debounce_time` is the amount of time to wait (and debounce duplicate events) before returning an event. + /// Higher durations reduce duplicates but increase the amount of time before a change event is processed. If the + /// duration is set too low, some systems might surface events _before_ their filesystem has the changes. + #[allow(unused)] + pub fn get_default_watcher( + path: String, + file_debounce_wait_time: Duration, + ) -> impl FnMut(crossbeam_channel::Sender) -> Option> + + Send + + Sync { + move |sender: crossbeam_channel::Sender| { + #[cfg(all( + feature = "file_watcher", + not(target_arch = "wasm32"), + not(target_os = "android") + ))] + return Some(Box::new( + super::file::FileWatcher::new( + std::path::PathBuf::from(path.clone()), + sender, + file_debounce_wait_time, + ) + .unwrap(), + )); + #[cfg(any( + not(feature = "file_watcher"), + target_arch = "wasm32", + target_os = "android" + ))] + return None; + } + } + + /// This will cause processed [`AssetReader`] futures (such as [`AssetReader::read`]) to wait until + /// the [`AssetProcessor`](crate::AssetProcessor) has finished processing the requested asset. + pub fn gate_on_processor(&mut self, processor_data: Arc) { + if let Some(reader) = self.processed_reader.take() { + self.processed_reader = Some(Box::new(ProcessorGatedReader::new( + self.id(), + reader, + processor_data, + ))); + } + } +} + +/// A collection of [`AssetSources`]. +pub struct AssetSources { + sources: HashMap, AssetSource>, + default: AssetSource, +} + +impl AssetSources { + /// Gets the [`AssetSource`] with the given `id`, if it exists. + pub fn get<'a, 'b>( + &'a self, + id: impl Into>, + ) -> Result<&'a AssetSource, MissingAssetSourceError> { + match id.into().into_owned() { + AssetSourceId::Default => Ok(&self.default), + AssetSourceId::Name(name) => self + .sources + .get(&name) + .ok_or_else(|| MissingAssetSourceError(AssetSourceId::Name(name))), + } + } + + /// Iterates all asset sources in the collection (including the default source). + pub fn iter(&self) -> impl Iterator { + self.sources.values().chain(Some(&self.default)) + } + + /// Mutably iterates all asset sources in the collection (including the default source). + pub fn iter_mut(&mut self) -> impl Iterator { + self.sources.values_mut().chain(Some(&mut self.default)) + } + + /// Iterates all processed asset sources in the collection (including the default source). + pub fn iter_processed(&self) -> impl Iterator { + self.iter().filter(|p| p.should_process()) + } + + /// Mutably iterates all processed asset sources in the collection (including the default source). + pub fn iter_processed_mut(&mut self) -> impl Iterator { + self.iter_mut().filter(|p| p.should_process()) + } + + /// Iterates over the [`AssetSourceId`] of every [`AssetSource`] in the collection (including the default source). + pub fn ids(&self) -> impl Iterator> + '_ { + self.sources + .keys() + .map(|k| AssetSourceId::Name(k.clone_owned())) + .chain(Some(AssetSourceId::Default)) + } + + /// This will cause processed [`AssetReader`] futures (such as [`AssetReader::read`]) to wait until + /// the [`AssetProcessor`](crate::AssetProcessor) has finished processing the requested asset. + pub fn gate_on_processor(&mut self, processor_data: Arc) { + for source in self.iter_processed_mut() { + source.gate_on_processor(processor_data.clone()); + } + } +} + +/// An error returned when an [`AssetSource`] does not exist for a given id. +#[derive(Error, Debug)] +#[error("Asset Source '{0}' does not exist")] +pub struct MissingAssetSourceError(AssetSourceId<'static>); + +/// An error returned when an [`AssetWriter`] does not exist for a given id. +#[derive(Error, Debug)] +#[error("Asset Source '{0}' does not have an AssetWriter.")] +pub struct MissingAssetWriterError(AssetSourceId<'static>); + +/// An error returned when a processed [`AssetReader`] does not exist for a given id. +#[derive(Error, Debug)] +#[error("Asset Source '{0}' does not have a processed AssetReader.")] +pub struct MissingProcessedAssetReaderError(AssetSourceId<'static>); + +/// An error returned when a processed [`AssetWriter`] does not exist for a given id. +#[derive(Error, Debug)] +#[error("Asset Source '{0}' does not have a processed AssetWriter.")] +pub struct MissingProcessedAssetWriterError(AssetSourceId<'static>); + +const MISSING_DEFAULT_SOURCE: &str = + "A default AssetSource is required. Add one to `AssetSourceBuilders`"; diff --git a/crates/bevy_asset/src/io/wasm.rs b/crates/bevy_asset/src/io/wasm.rs index f0acbae067d6c..99ff39799b087 100644 --- a/crates/bevy_asset/src/io/wasm.rs +++ b/crates/bevy_asset/src/io/wasm.rs @@ -1,6 +1,5 @@ use crate::io::{ - get_meta_path, AssetReader, AssetReaderError, AssetWatcher, EmptyPathStream, PathStream, - Reader, VecReader, + get_meta_path, AssetReader, AssetReaderError, EmptyPathStream, PathStream, Reader, VecReader, }; use bevy_log::error; use bevy_utils::BoxedFuture; @@ -99,11 +98,4 @@ impl AssetReader for HttpWasmAssetReader { error!("Reading directories is not supported with the HttpWasmAssetReader"); Box::pin(async move { Ok(false) }) } - - fn watch_for_changes( - &self, - _event_sender: crossbeam_channel::Sender, - ) -> Option> { - None - } } diff --git a/crates/bevy_asset/src/lib.rs b/crates/bevy_asset/src/lib.rs index 31bdc3a2e62ca..148dc40e56d01 100644 --- a/crates/bevy_asset/src/lib.rs +++ b/crates/bevy_asset/src/lib.rs @@ -8,7 +8,7 @@ pub mod saver; pub mod prelude { #[doc(hidden)] pub use crate::{ - Asset, AssetApp, AssetEvent, AssetId, AssetPlugin, AssetServer, Assets, Handle, + Asset, AssetApp, AssetEvent, AssetId, AssetMode, AssetPlugin, AssetServer, Assets, Handle, UntypedHandle, }; } @@ -38,7 +38,7 @@ pub use server::*; pub use bevy_utils::BoxedFuture; use crate::{ - io::{processor_gated::ProcessorGatedReader, AssetProvider, AssetProviders}, + io::{embedded::EmbeddedAssetRegistry, AssetSourceBuilder, AssetSourceBuilders, AssetSourceId}, processor::{AssetProcessor, Process}, }; use bevy_app::{App, First, MainScheduleOrder, Plugin, PostUpdate, Startup}; @@ -50,141 +50,124 @@ use bevy_ecs::{ use bevy_reflect::{FromReflect, GetTypeRegistration, Reflect, TypePath}; use std::{any::TypeId, sync::Arc}; -/// Provides "asset" loading and processing functionality. An [`Asset`] is a "runtime value" that is loaded from an [`AssetProvider`], +/// Provides "asset" loading and processing functionality. An [`Asset`] is a "runtime value" that is loaded from an [`AssetSource`], /// which can be something like a filesystem, a network, etc. /// -/// Supports flexible "modes", such as [`AssetPlugin::Processed`] and -/// [`AssetPlugin::Unprocessed`] that enable using the asset workflow that best suits your project. -pub enum AssetPlugin { - /// Loads assets without any "preprocessing" from the configured asset `source` (defaults to the `assets` folder). - Unprocessed { - source: AssetProvider, - watch_for_changes: bool, - }, - /// Loads "processed" assets from a given `destination` source (defaults to the `imported_assets/Default` folder). This should - /// generally only be used when distributing apps. Use [`AssetPlugin::ProcessedDev`] to develop apps that process assets, - /// then switch to [`AssetPlugin::Processed`] when deploying the apps. - Processed { - destination: AssetProvider, - watch_for_changes: bool, - }, - /// Starts an [`AssetProcessor`] in the background that reads assets from the `source` provider (defaults to the `assets` folder), - /// processes them according to their [`AssetMeta`], and writes them to the `destination` provider (defaults to the `imported_assets/Default` folder). +/// Supports flexible "modes", such as [`AssetMode::Processed`] and +/// [`AssetMode::Unprocessed`] that enable using the asset workflow that best suits your project. +/// +/// [`AssetSource`]: crate::io::AssetSource +pub struct AssetPlugin { + /// The default file path to use (relative to the project root) for unprocessed assets. + pub file_path: String, + /// The default file path to use (relative to the project root) for processed assets. + pub processed_file_path: String, + /// If set, will override the default "watch for changes" setting. By default "watch for changes" will be `false` unless + /// the `watch` cargo feature is set. `watch` can be enabled manually, or it will be automatically enabled if a specific watcher + /// like `file_watcher` is enabled. + /// + /// Most use cases should leave this set to [`None`] and enable a specific watcher feature such as `file_watcher` to enable + /// watching for dev-scenarios. + pub watch_for_changes_override: Option, + /// The [`AssetMode`] to use for this server. + pub mode: AssetMode, +} + +pub enum AssetMode { + /// Loads assets from their [`AssetSource`]'s default [`AssetReader`] without any "preprocessing". + /// + /// [`AssetReader`]: crate::io::AssetReader + /// [`AssetSource`]: crate::io::AssetSource + Unprocessed, + /// Loads assets from their final processed [`AssetReader`]. This should generally only be used when distributing apps. + /// Use [`AssetMode::ProcessedDev`] to develop apps that process assets, then switch to [`AssetMode::Processed`] when deploying the apps. /// - /// By default this will hot reload changes to the `source` provider, resulting in reprocessing the asset and reloading it in the [`App`]. + /// [`AssetReader`]: crate::io::AssetReader + Processed, + /// Starts an [`AssetProcessor`] in the background that reads assets from their unprocessed [`AssetSource`] (defaults to the `assets` folder), + /// processes them according to their [`AssetMeta`], and writes them to their processed [`AssetSource`] (defaults to the `imported_assets/Default` folder). + /// + /// Apps will load assets from the processed [`AssetSource`]. Asset loads will wait until the asset processor has finished processing the requested asset. + /// + /// This should generally be used in combination with the `file_watcher` cargo feature to support hot-reloading and re-processing assets. /// /// [`AssetMeta`]: crate::meta::AssetMeta - ProcessedDev { - source: AssetProvider, - destination: AssetProvider, - watch_for_changes: bool, - }, + /// [`AssetSource`]: crate::io::AssetSource + ProcessedDev, } impl Default for AssetPlugin { fn default() -> Self { - Self::unprocessed() + Self { + mode: AssetMode::Unprocessed, + file_path: Self::DEFAULT_UNPROCESSED_FILE_PATH.to_string(), + processed_file_path: Self::DEFAULT_PROCESSED_FILE_PATH.to_string(), + watch_for_changes_override: None, + } } } impl AssetPlugin { - const DEFAULT_FILE_SOURCE: &'static str = "assets"; + const DEFAULT_UNPROCESSED_FILE_PATH: &'static str = "assets"; /// NOTE: this is in the Default sub-folder to make this forward compatible with "import profiles" /// and to allow us to put the "processor transaction log" at `imported_assets/log` - const DEFAULT_FILE_DESTINATION: &'static str = "imported_assets/Default"; - - /// Returns the default [`AssetPlugin::Processed`] configuration - pub fn processed() -> Self { - Self::Processed { - destination: Default::default(), - watch_for_changes: false, - } - } - - /// Returns the default [`AssetPlugin::ProcessedDev`] configuration - pub fn processed_dev() -> Self { - Self::ProcessedDev { - source: Default::default(), - destination: Default::default(), - watch_for_changes: true, - } - } - - /// Returns the default [`AssetPlugin::Unprocessed`] configuration - pub fn unprocessed() -> Self { - Self::Unprocessed { - source: Default::default(), - watch_for_changes: false, - } - } - - /// Enables watching for changes, which will hot-reload assets when they change. - pub fn watch_for_changes(mut self) -> Self { - match &mut self { - AssetPlugin::Unprocessed { - watch_for_changes, .. - } - | AssetPlugin::Processed { - watch_for_changes, .. - } - | AssetPlugin::ProcessedDev { - watch_for_changes, .. - } => *watch_for_changes = true, - }; - self - } + const DEFAULT_PROCESSED_FILE_PATH: &'static str = "imported_assets/Default"; } impl Plugin for AssetPlugin { fn build(&self, app: &mut App) { - app.init_schedule(UpdateAssets) - .init_schedule(AssetEvents) - .init_resource::(); + app.init_schedule(UpdateAssets).init_schedule(AssetEvents); + let embedded = EmbeddedAssetRegistry::default(); + { + let mut sources = app + .world + .get_resource_or_insert_with::(Default::default); + sources.init_default_source(&self.file_path, &self.processed_file_path); + embedded.register_source(&mut sources); + } { - match self { - AssetPlugin::Unprocessed { - source, - watch_for_changes, - } => { - let source_reader = app - .world - .resource_mut::() - .get_source_reader(source); - app.insert_resource(AssetServer::new(source_reader, *watch_for_changes)); + let mut watch = cfg!(feature = "watch"); + if let Some(watch_override) = self.watch_for_changes_override { + watch = watch_override; + } + match self.mode { + AssetMode::Unprocessed => { + let mut builders = app.world.resource_mut::(); + let sources = builders.build_sources(watch, false); + app.insert_resource(AssetServer::new( + sources, + AssetServerMode::Unprocessed, + watch, + )); } - AssetPlugin::Processed { - destination, - watch_for_changes, - } => { - let destination_reader = app - .world - .resource_mut::() - .get_destination_reader(destination); - app.insert_resource(AssetServer::new(destination_reader, *watch_for_changes)); + AssetMode::Processed => { + let mut builders = app.world.resource_mut::(); + let sources = builders.build_sources(false, watch); + app.insert_resource(AssetServer::new( + sources, + AssetServerMode::Processed, + watch, + )); } - AssetPlugin::ProcessedDev { - source, - destination, - watch_for_changes, - } => { - let mut asset_providers = app.world.resource_mut::(); - let processor = AssetProcessor::new(&mut asset_providers, source, destination); - let destination_reader = asset_providers.get_destination_reader(source); - // the main asset server gates loads based on asset state - let gated_reader = - ProcessorGatedReader::new(destination_reader, processor.data.clone()); + AssetMode::ProcessedDev => { + let mut builders = app.world.resource_mut::(); + let processor = AssetProcessor::new(&mut builders); + let mut sources = builders.build_sources(false, watch); + sources.gate_on_processor(processor.data.clone()); // the main asset server shares loaders with the processor asset server app.insert_resource(AssetServer::new_with_loaders( - Box::new(gated_reader), + sources, processor.server().data.loaders.clone(), - *watch_for_changes, + AssetServerMode::Processed, + watch, )) .insert_resource(processor) .add_systems(Startup, AssetProcessor::start); } } } - app.init_asset::() + app.insert_resource(embedded) + .init_asset::() .init_asset::<()>() .configure_sets( UpdateAssets, @@ -254,6 +237,12 @@ pub trait AssetApp { fn register_asset_loader(&mut self, loader: L) -> &mut Self; /// Registers the given `processor` in the [`App`]'s [`AssetProcessor`]. fn register_asset_processor(&mut self, processor: P) -> &mut Self; + /// Registers the given [`AssetSourceBuilder`] with the given `id`. + fn register_asset_source( + &mut self, + id: impl Into>, + source: AssetSourceBuilder, + ) -> &mut Self; /// Sets the default asset processor for the given `extension`. fn set_default_asset_processor(&mut self, extension: &str) -> &mut Self; /// Initializes the given loader in the [`App`]'s [`AssetServer`]. @@ -350,6 +339,21 @@ impl AssetApp for App { } self } + + fn register_asset_source( + &mut self, + id: impl Into>, + source: AssetSourceBuilder, + ) -> &mut Self { + { + let mut sources = self + .world + .get_resource_or_insert_with(AssetSourceBuilders::default); + sources.insert(id, source); + } + + self + } } /// A system set that holds all "track asset" operations. @@ -366,55 +370,6 @@ pub struct UpdateAssets; #[derive(Debug, Hash, PartialEq, Eq, Clone, ScheduleLabel)] pub struct AssetEvents; -/// Loads an "internal" asset by embedding the string stored in the given `path_str` and associates it with the given handle. -#[macro_export] -macro_rules! load_internal_asset { - ($app: ident, $handle: expr, $path_str: expr, $loader: expr) => {{ - let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); - assets.insert($handle, ($loader)( - include_str!($path_str), - std::path::Path::new(file!()) - .parent() - .unwrap() - .join($path_str) - .to_string_lossy() - )); - }}; - // we can't support params without variadic arguments, so internal assets with additional params can't be hot-reloaded - ($app: ident, $handle: ident, $path_str: expr, $loader: expr $(, $param:expr)+) => {{ - let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); - assets.insert($handle, ($loader)( - include_str!($path_str), - std::path::Path::new(file!()) - .parent() - .unwrap() - .join($path_str) - .to_string_lossy(), - $($param),+ - )); - }}; -} - -/// Loads an "internal" binary asset by embedding the bytes stored in the given `path_str` and associates it with the given handle. -#[macro_export] -macro_rules! load_internal_binary_asset { - ($app: ident, $handle: expr, $path_str: expr, $loader: expr) => {{ - let mut assets = $app.world.resource_mut::<$crate::Assets<_>>(); - assets.insert( - $handle, - ($loader)( - include_bytes!($path_str).as_ref(), - std::path::Path::new(file!()) - .parent() - .unwrap() - .join($path_str) - .to_string_lossy() - .into(), - ), - ); - }}; -} - #[cfg(test)] mod tests { use crate::{ @@ -424,12 +379,11 @@ mod tests { io::{ gated::{GateOpener, GatedReader}, memory::{Dir, MemoryAssetReader}, - Reader, + AssetSource, AssetSourceId, Reader, }, loader::{AssetLoader, LoadContext}, - Asset, AssetApp, AssetEvent, AssetId, AssetPath, AssetPlugin, AssetProvider, - AssetProviders, AssetServer, Assets, DependencyLoadState, LoadState, - RecursiveDependencyLoadState, + Asset, AssetApp, AssetEvent, AssetId, AssetPath, AssetPlugin, AssetServer, Assets, + DependencyLoadState, LoadState, RecursiveDependencyLoadState, }; use bevy_app::{App, Update}; use bevy_core::TaskPoolPlugin; @@ -534,17 +488,14 @@ mod tests { fn test_app(dir: Dir) -> (App, GateOpener) { let mut app = App::new(); let (gated_memory_reader, gate_opener) = GatedReader::new(MemoryAssetReader { root: dir }); - app.insert_resource( - AssetProviders::default() - .with_reader("Test", move || Box::new(gated_memory_reader.clone())), + app.register_asset_source( + AssetSourceId::Default, + AssetSource::build().with_reader(move || Box::new(gated_memory_reader.clone())), ) .add_plugins(( TaskPoolPlugin::default(), LogPlugin::default(), - AssetPlugin::Unprocessed { - source: AssetProvider::Custom("Test".to_string()), - watch_for_changes: false, - }, + AssetPlugin::default(), )); (app, gate_opener) } diff --git a/crates/bevy_asset/src/loader.rs b/crates/bevy_asset/src/loader.rs index 04be5ce834508..94ab97593dd28 100644 --- a/crates/bevy_asset/src/loader.rs +++ b/crates/bevy_asset/src/loader.rs @@ -1,11 +1,12 @@ use crate::{ - io::{AssetReaderError, Reader}, + io::{AssetReaderError, MissingAssetSourceError, MissingProcessedAssetReaderError, Reader}, meta::{ loader_settings_meta_transform, AssetHash, AssetMeta, AssetMetaDyn, ProcessedInfoMinimal, Settings, }, path::AssetPath, - Asset, AssetLoadError, AssetServer, Assets, Handle, UntypedAssetId, UntypedHandle, + Asset, AssetLoadError, AssetServer, AssetServerMode, Assets, Handle, UntypedAssetId, + UntypedHandle, }; use bevy_ecs::world::World; use bevy_utils::{BoxedFuture, CowArc, HashMap, HashSet}; @@ -367,7 +368,7 @@ impl<'a> LoadContext<'a> { ) -> Handle { let label = label.into(); let loaded_asset: ErasedLoadedAsset = loaded_asset.into(); - let labeled_path = self.asset_path.with_label(label.clone()); + let labeled_path = self.asset_path.clone().with_label(label.clone()); let handle = self .asset_server .get_or_create_path_handle(labeled_path, None); @@ -385,7 +386,7 @@ impl<'a> LoadContext<'a> { /// /// See [`AssetPath`] for more on labeled assets. pub fn has_labeled_asset<'b>(&self, label: impl Into>) -> bool { - let path = self.asset_path.with_label(label.into()); + let path = self.asset_path.clone().with_label(label.into()); self.asset_server.get_handle_untyped(&path).is_some() } @@ -412,15 +413,21 @@ impl<'a> LoadContext<'a> { } /// Gets the source asset path for this load context. - pub async fn read_asset_bytes<'b>( - &mut self, - path: &'b Path, + pub async fn read_asset_bytes<'b, 'c>( + &'b mut self, + path: impl Into>, ) -> Result, ReadAssetBytesError> { - let mut reader = self.asset_server.reader().read(path).await?; + let path = path.into(); + let source = self.asset_server.get_source(path.source())?; + let asset_reader = match self.asset_server.mode() { + AssetServerMode::Unprocessed { .. } => source.reader(), + AssetServerMode::Processed { .. } => source.processed_reader()?, + }; + let mut reader = asset_reader.read(path.path()).await?; let hash = if self.populate_hashes { // NOTE: ensure meta is read while the asset bytes reader is still active to ensure transactionality // See `ProcessorGatedReader` for more info - let meta_bytes = self.asset_server.reader().read_meta_bytes(path).await?; + let meta_bytes = asset_reader.read_meta_bytes(path.path()).await?; let minimal: ProcessedInfoMinimal = ron::de::from_bytes(&meta_bytes) .map_err(DeserializeMetaError::DeserializeMinimal)?; let processed_info = minimal @@ -432,8 +439,7 @@ impl<'a> LoadContext<'a> { }; let mut bytes = Vec::new(); reader.read_to_end(&mut bytes).await?; - self.loader_dependencies - .insert(AssetPath::from_path(path.to_owned()), hash); + self.loader_dependencies.insert(path.clone_owned(), hash); Ok(bytes) } @@ -480,7 +486,7 @@ impl<'a> LoadContext<'a> { &mut self, label: impl Into>, ) -> Handle { - let path = self.asset_path.with_label(label); + let path = self.asset_path.clone().with_label(label); let handle = self.asset_server.get_or_create_path_handle::(path, None); self.dependencies.insert(handle.id().untyped()); handle @@ -542,6 +548,10 @@ pub enum ReadAssetBytesError { DeserializeMetaError(#[from] DeserializeMetaError), #[error(transparent)] AssetReaderError(#[from] AssetReaderError), + #[error(transparent)] + MissingAssetSourceError(#[from] MissingAssetSourceError), + #[error(transparent)] + MissingProcessedAssetReaderError(#[from] MissingProcessedAssetReaderError), /// Encountered an I/O error while loading an asset. #[error("Encountered an io error while loading asset: {0}")] Io(#[from] std::io::Error), diff --git a/crates/bevy_asset/src/path.rs b/crates/bevy_asset/src/path.rs index efd12041148e3..11168ca245b50 100644 --- a/crates/bevy_asset/src/path.rs +++ b/crates/bevy_asset/src/path.rs @@ -1,3 +1,4 @@ +use crate::io::AssetSourceId; use bevy_reflect::{ std_traits::ReflectDefault, utility::NonGenericTypeInfoCell, FromReflect, FromType, GetTypeRegistration, Reflect, ReflectDeserialize, ReflectFromPtr, ReflectFromReflect, @@ -12,10 +13,13 @@ use std::{ ops::Deref, path::{Path, PathBuf}, }; +use thiserror::Error; /// Represents a path to an asset in a "virtual filesystem". /// -/// Asset paths consist of two main parts: +/// Asset paths consist of three main parts: +/// * [`AssetPath::source`]: The name of the [`AssetSource`](crate::io::AssetSource) to load the asset from. +/// This is optional. If one is not set the default source will be used (which is the `assets` folder by default). /// * [`AssetPath::path`]: The "virtual filesystem path" pointing to an asset source file. /// * [`AssetPath::label`]: An optional "named sub asset". When assets are loaded, they are /// allowed to load "sub assets" of any type, which are identified by a named "label". @@ -33,20 +37,24 @@ use std::{ /// # struct Scene; /// # /// # let asset_server: AssetServer = panic!(); -/// // This loads the `my_scene.scn` base asset. +/// // This loads the `my_scene.scn` base asset from the default asset source. /// let scene: Handle = asset_server.load("my_scene.scn"); /// -/// // This loads the `PlayerMesh` labeled asset from the `my_scene.scn` base asset. +/// // This loads the `PlayerMesh` labeled asset from the `my_scene.scn` base asset in the default asset source. /// let mesh: Handle = asset_server.load("my_scene.scn#PlayerMesh"); +/// +/// // This loads the `my_scene.scn` base asset from a custom 'remote' asset source. +/// let scene: Handle = asset_server.load("remote://my_scene.scn"); /// ``` /// /// [`AssetPath`] implements [`From`] for `&'static str`, `&'static Path`, and `&'a String`, /// which allows us to optimize the static cases. /// This means that the common case of `asset_server.load("my_scene.scn")` when it creates and /// clones internal owned [`AssetPaths`](AssetPath). -/// This also means that you should use [`AssetPath::new`] in cases where `&str` is the explicit type. +/// This also means that you should use [`AssetPath::parse`] in cases where `&str` is the explicit type. #[derive(Eq, PartialEq, Hash, Clone, Default)] pub struct AssetPath<'a> { + source: AssetSourceId<'a>, path: CowArc<'a, Path>, label: Option>, } @@ -67,38 +75,128 @@ impl<'a> Display for AssetPath<'a> { } } +#[derive(Error, Debug, PartialEq, Eq)] +pub enum ParseAssetPathError { + #[error("Asset source must be followed by '://'")] + InvalidSourceSyntax, + #[error("Asset source must be at least one character. Either specify the source before the '://' or remove the `://`")] + MissingSource, + #[error("Asset label must be at least one character. Either specify the label after the '#' or remove the '#'")] + MissingLabel, +} + impl<'a> AssetPath<'a> { /// Creates a new [`AssetPath`] from a string in the asset path format: /// * An asset at the root: `"scene.gltf"` /// * An asset nested in some folders: `"some/path/scene.gltf"` /// * An asset with a "label": `"some/path/scene.gltf#Mesh0"` + /// * An asset with a custom "source": `"custom://some/path/scene.gltf#Mesh0"` /// /// Prefer [`From<'static str>`] for static strings, as this will prevent allocations /// and reference counting for [`AssetPath::into_owned`]. - pub fn new(asset_path: &'a str) -> AssetPath<'a> { - let (path, label) = Self::get_parts(asset_path); - Self { + /// + /// # Panics + /// Panics if the asset path is in an invalid format. Use [`AssetPath::try_parse`] for a fallible variant + pub fn parse(asset_path: &'a str) -> AssetPath<'a> { + Self::try_parse(asset_path).unwrap() + } + + /// Creates a new [`AssetPath`] from a string in the asset path format: + /// * An asset at the root: `"scene.gltf"` + /// * An asset nested in some folders: `"some/path/scene.gltf"` + /// * An asset with a "label": `"some/path/scene.gltf#Mesh0"` + /// * An asset with a custom "source": `"custom://some/path/scene.gltf#Mesh0"` + /// + /// Prefer [`From<'static str>`] for static strings, as this will prevent allocations + /// and reference counting for [`AssetPath::into_owned`]. + /// + /// This will return a [`ParseAssetPathError`] if `asset_path` is in an invalid format. + pub fn try_parse(asset_path: &'a str) -> Result, ParseAssetPathError> { + let (source, path, label) = Self::parse_internal(asset_path).unwrap(); + Ok(Self { + source: match source { + Some(source) => AssetSourceId::Name(CowArc::Borrowed(source)), + None => AssetSourceId::Default, + }, path: CowArc::Borrowed(path), label: label.map(CowArc::Borrowed), - } + }) } - fn get_parts(asset_path: &str) -> (&Path, Option<&str>) { - let mut parts = asset_path.splitn(2, '#'); - let path = Path::new(parts.next().expect("Path must be set.")); - let label = parts.next(); - (path, label) + fn parse_internal( + asset_path: &str, + ) -> Result<(Option<&str>, &Path, Option<&str>), ParseAssetPathError> { + let mut chars = asset_path.char_indices(); + let mut source_range = None; + let mut path_range = 0..asset_path.len(); + let mut label_range = None; + while let Some((index, char)) = chars.next() { + match char { + ':' => { + let (_, char) = chars + .next() + .ok_or(ParseAssetPathError::InvalidSourceSyntax)?; + if char != '/' { + return Err(ParseAssetPathError::InvalidSourceSyntax); + } + let (index, char) = chars + .next() + .ok_or(ParseAssetPathError::InvalidSourceSyntax)?; + if char != '/' { + return Err(ParseAssetPathError::InvalidSourceSyntax); + } + source_range = Some(0..index - 2); + path_range.start = index + 1; + } + '#' => { + path_range.end = index; + label_range = Some(index + 1..asset_path.len()); + break; + } + _ => {} + } + } + + let source = match source_range { + Some(source_range) => { + if source_range.is_empty() { + return Err(ParseAssetPathError::MissingSource); + } + Some(&asset_path[source_range]) + } + None => None, + }; + let label = match label_range { + Some(label_range) => { + if label_range.is_empty() { + return Err(ParseAssetPathError::MissingLabel); + } + Some(&asset_path[label_range]) + } + None => None, + }; + + let path = Path::new(&asset_path[path_range]); + Ok((source, path, label)) } /// Creates a new [`AssetPath`] from a [`Path`]. #[inline] - pub fn from_path(path: impl Into>) -> AssetPath<'a> { + pub fn from_path(path: &'a Path) -> AssetPath<'a> { AssetPath { - path: path.into(), + path: CowArc::Borrowed(path), + source: AssetSourceId::Default, label: None, } } + /// Gets the "asset source", if one was defined. If none was defined, the default source + /// will be used. + #[inline] + pub fn source(&self) -> &AssetSourceId { + &self.source + } + /// Gets the "sub-asset label". #[inline] pub fn label(&self) -> Option<&str> { @@ -115,6 +213,7 @@ impl<'a> AssetPath<'a> { #[inline] pub fn without_label(&self) -> AssetPath<'_> { Self { + source: self.source.clone(), path: self.path.clone(), label: None, } @@ -135,24 +234,62 @@ impl<'a> AssetPath<'a> { /// Returns this asset path with the given label. This will replace the previous /// label if it exists. #[inline] - pub fn with_label(&self, label: impl Into>) -> AssetPath<'a> { + pub fn with_label(self, label: impl Into>) -> AssetPath<'a> { AssetPath { - path: self.path.clone(), + source: self.source, + path: self.path, label: Some(label.into()), } } + /// Returns this asset path with the given asset source. This will replace the previous asset + /// source if it exists. + #[inline] + pub fn with_source(self, source: impl Into>) -> AssetPath<'a> { + AssetPath { + source: source.into(), + path: self.path, + label: self.label, + } + } + + /// Returns an [`AssetPath`] for the parent folder of this path, if there is a parent folder in the path. + pub fn parent(&self) -> Option> { + let path = match &self.path { + CowArc::Borrowed(path) => CowArc::Borrowed(path.parent()?), + CowArc::Static(path) => CowArc::Static(path.parent()?), + CowArc::Owned(path) => path.parent()?.to_path_buf().into(), + }; + Some(AssetPath { + source: self.source.clone(), + label: None, + path, + }) + } + /// Converts this into an "owned" value. If internally a value is borrowed, it will be cloned into an "owned [`Arc`]". - /// If it is already an "owned [`Arc`]", it will remain unchanged. + /// If internally a value is a static reference, the static reference will be used unchanged. + /// If internally a value is an "owned [`Arc`]", it will remain unchanged. /// /// [`Arc`]: std::sync::Arc pub fn into_owned(self) -> AssetPath<'static> { AssetPath { + source: self.source.into_owned(), path: self.path.into_owned(), label: self.label.map(|l| l.into_owned()), } } + /// Clones this into an "owned" value. If internally a value is borrowed, it will be cloned into an "owned [`Arc`]". + /// If internally a value is a static reference, the static reference will be used unchanged. + /// If internally a value is an "owned [`Arc`]", the [`Arc`] will be cloned. + /// + /// [`Arc`]: std::sync::Arc + #[inline] + pub fn clone_owned(&self) -> AssetPath<'static> { + self.clone().into_owned() + } + /// Returns the full extension (including multiple '.' values). /// Ex: Returns `"config.ron"` for `"my_asset.config.ron"` pub fn get_full_extension(&self) -> Option { @@ -176,8 +313,9 @@ impl<'a> AssetPath<'a> { impl From<&'static str> for AssetPath<'static> { #[inline] fn from(asset_path: &'static str) -> Self { - let (path, label) = Self::get_parts(asset_path); + let (source, path, label) = Self::parse_internal(asset_path).unwrap(); AssetPath { + source: source.into(), path: CowArc::Static(path), label: label.map(CowArc::Static), } @@ -187,14 +325,14 @@ impl From<&'static str> for AssetPath<'static> { impl<'a> From<&'a String> for AssetPath<'a> { #[inline] fn from(asset_path: &'a String) -> Self { - AssetPath::new(asset_path.as_str()) + AssetPath::parse(asset_path.as_str()) } } impl From for AssetPath<'static> { #[inline] fn from(asset_path: String) -> Self { - AssetPath::new(asset_path.as_str()).into_owned() + AssetPath::parse(asset_path.as_str()).into_owned() } } @@ -202,6 +340,7 @@ impl From<&'static Path> for AssetPath<'static> { #[inline] fn from(path: &'static Path) -> Self { Self { + source: AssetSourceId::Default, path: CowArc::Static(path), label: None, } @@ -212,6 +351,7 @@ impl From for AssetPath<'static> { #[inline] fn from(path: PathBuf) -> Self { Self { + source: AssetSourceId::Default, path: path.into(), label: None, } @@ -261,7 +401,7 @@ impl<'de> Visitor<'de> for AssetPathVisitor { where E: serde::de::Error, { - Ok(AssetPath::new(v).into_owned()) + Ok(AssetPath::parse(v).into_owned()) } fn visit_string(self, v: String) -> Result @@ -402,3 +542,36 @@ impl FromReflect for AssetPath<'static> { >(::as_any(reflect))?)) } } + +#[cfg(test)] +mod tests { + use crate::AssetPath; + use std::path::Path; + + #[test] + fn parse_asset_path() { + let result = AssetPath::parse_internal("a/b.test"); + assert_eq!(result, Ok((None, Path::new("a/b.test"), None))); + + let result = AssetPath::parse_internal("http://a/b.test"); + assert_eq!(result, Ok((Some("http"), Path::new("a/b.test"), None))); + + let result = AssetPath::parse_internal("http://a/b.test#Foo"); + assert_eq!( + result, + Ok((Some("http"), Path::new("a/b.test"), Some("Foo"))) + ); + + let result = AssetPath::parse_internal("http://"); + assert_eq!(result, Ok((Some("http"), Path::new(""), None))); + + let result = AssetPath::parse_internal("://x"); + assert_eq!(result, Err(crate::ParseAssetPathError::MissingSource)); + + let result = AssetPath::parse_internal("a/b.test#"); + assert_eq!(result, Err(crate::ParseAssetPathError::MissingLabel)); + + let result = AssetPath::parse_internal("http:/"); + assert_eq!(result, Err(crate::ParseAssetPathError::InvalidSourceSyntax)); + } +} diff --git a/crates/bevy_asset/src/processor/log.rs b/crates/bevy_asset/src/processor/log.rs index 0c1c3d93fbade..642de9b127142 100644 --- a/crates/bevy_asset/src/processor/log.rs +++ b/crates/bevy_asset/src/processor/log.rs @@ -1,15 +1,16 @@ +use crate::AssetPath; use async_fs::File; use bevy_log::error; use bevy_utils::HashSet; use futures_lite::{AsyncReadExt, AsyncWriteExt}; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use thiserror::Error; /// An in-memory representation of a single [`ProcessorTransactionLog`] entry. #[derive(Debug)] pub(crate) enum LogEntry { - BeginProcessing(PathBuf), - EndProcessing(PathBuf), + BeginProcessing(AssetPath<'static>), + EndProcessing(AssetPath<'static>), UnrecoverableError, } @@ -55,12 +56,12 @@ pub enum ValidateLogError { /// An error that occurs when validating individual [`ProcessorTransactionLog`] entries. #[derive(Error, Debug)] pub enum LogEntryError { - #[error("Encountered a duplicate process asset transaction: {0:?}")] - DuplicateTransaction(PathBuf), - #[error("A transaction was ended that never started {0:?}")] - EndedMissingTransaction(PathBuf), - #[error("An asset started processing but never finished: {0:?}")] - UnfinishedTransaction(PathBuf), + #[error("Encountered a duplicate process asset transaction: {0}")] + DuplicateTransaction(AssetPath<'static>), + #[error("A transaction was ended that never started {0}")] + EndedMissingTransaction(AssetPath<'static>), + #[error("An asset started processing but never finished: {0}")] + UnfinishedTransaction(AssetPath<'static>), } const LOG_PATH: &str = "imported_assets/log"; @@ -114,9 +115,13 @@ impl ProcessorTransactionLog { file.read_to_string(&mut string).await?; for line in string.lines() { if let Some(path_str) = line.strip_prefix(ENTRY_BEGIN) { - log_lines.push(LogEntry::BeginProcessing(PathBuf::from(path_str))); + log_lines.push(LogEntry::BeginProcessing( + AssetPath::parse(path_str).into_owned(), + )); } else if let Some(path_str) = line.strip_prefix(ENTRY_END) { - log_lines.push(LogEntry::EndProcessing(PathBuf::from(path_str))); + log_lines.push(LogEntry::EndProcessing( + AssetPath::parse(path_str).into_owned(), + )); } else if line.is_empty() { continue; } else { @@ -127,7 +132,7 @@ impl ProcessorTransactionLog { } pub(crate) async fn validate() -> Result<(), ValidateLogError> { - let mut transactions: HashSet = Default::default(); + let mut transactions: HashSet> = Default::default(); let mut errors: Vec = Vec::new(); let entries = Self::read().await?; for entry in entries { @@ -160,21 +165,27 @@ impl ProcessorTransactionLog { /// Logs the start of an asset being processed. If this is not followed at some point in the log by a closing [`ProcessorTransactionLog::end_processing`], /// in the next run of the processor the asset processing will be considered "incomplete" and it will be reprocessed. - pub(crate) async fn begin_processing(&mut self, path: &Path) -> Result<(), WriteLogError> { - self.write(&format!("{ENTRY_BEGIN}{}\n", path.to_string_lossy())) + pub(crate) async fn begin_processing( + &mut self, + path: &AssetPath<'_>, + ) -> Result<(), WriteLogError> { + self.write(&format!("{ENTRY_BEGIN}{path}\n")) .await .map_err(|e| WriteLogError { - log_entry: LogEntry::BeginProcessing(path.to_owned()), + log_entry: LogEntry::BeginProcessing(path.clone_owned()), error: e, }) } /// Logs the end of an asset being successfully processed. See [`ProcessorTransactionLog::begin_processing`]. - pub(crate) async fn end_processing(&mut self, path: &Path) -> Result<(), WriteLogError> { - self.write(&format!("{ENTRY_END}{}\n", path.to_string_lossy())) + pub(crate) async fn end_processing( + &mut self, + path: &AssetPath<'_>, + ) -> Result<(), WriteLogError> { + self.write(&format!("{ENTRY_END}{path}\n")) .await .map_err(|e| WriteLogError { - log_entry: LogEntry::EndProcessing(path.to_owned()), + log_entry: LogEntry::EndProcessing(path.clone_owned()), error: e, }) } diff --git a/crates/bevy_asset/src/processor/mod.rs b/crates/bevy_asset/src/processor/mod.rs index 07803ee6ba932..4e5b2a878ab00 100644 --- a/crates/bevy_asset/src/processor/mod.rs +++ b/crates/bevy_asset/src/processor/mod.rs @@ -6,15 +6,15 @@ pub use process::*; use crate::{ io::{ - processor_gated::ProcessorGatedReader, AssetProvider, AssetProviders, AssetReader, - AssetReaderError, AssetSourceEvent, AssetWatcher, AssetWriter, AssetWriterError, + AssetReader, AssetReaderError, AssetSource, AssetSourceBuilders, AssetSourceEvent, + AssetSourceId, AssetSources, AssetWriter, AssetWriterError, MissingAssetSourceError, }, meta::{ get_asset_hash, get_full_asset_hash, AssetAction, AssetActionMinimal, AssetHash, AssetMeta, AssetMetaDyn, AssetMetaMinimal, ProcessedInfo, ProcessedInfoMinimal, }, - AssetLoadError, AssetPath, AssetServer, DeserializeMetaError, - MissingAssetLoaderForExtensionError, CANNOT_WATCH_ERROR_MESSAGE, + AssetLoadError, AssetPath, AssetServer, AssetServerMode, DeserializeMetaError, + MissingAssetLoaderForExtensionError, }; use bevy_ecs::prelude::*; use bevy_log::{debug, error, trace, warn}; @@ -30,10 +30,10 @@ use std::{ }; use thiserror::Error; -/// A "background" asset processor that reads asset values from a source [`AssetProvider`] (which corresponds to an [`AssetReader`] / [`AssetWriter`] pair), -/// processes them in some way, and writes them to a destination [`AssetProvider`]. +/// A "background" asset processor that reads asset values from a source [`AssetSource`] (which corresponds to an [`AssetReader`] / [`AssetWriter`] pair), +/// processes them in some way, and writes them to a destination [`AssetSource`]. /// -/// This will create .meta files (a human-editable serialized form of [`AssetMeta`]) in the source [`AssetProvider`] for assets that +/// This will create .meta files (a human-editable serialized form of [`AssetMeta`]) in the source [`AssetSource`] for assets that /// that can be loaded and/or processed. This enables developers to configure how each asset should be loaded and/or processed. /// /// [`AssetProcessor`] can be run in the background while a Bevy App is running. Changes to assets will be automatically detected and hot-reloaded. @@ -58,37 +58,21 @@ pub struct AssetProcessorData { /// Default processors for file extensions default_processors: RwLock>, state: async_lock::RwLock, - source_reader: Box, - source_writer: Box, - destination_reader: Box, - destination_writer: Box, + sources: AssetSources, initialized_sender: async_broadcast::Sender<()>, initialized_receiver: async_broadcast::Receiver<()>, finished_sender: async_broadcast::Sender<()>, finished_receiver: async_broadcast::Receiver<()>, - source_event_receiver: crossbeam_channel::Receiver, - _source_watcher: Option>, } impl AssetProcessor { /// Creates a new [`AssetProcessor`] instance. - pub fn new( - providers: &mut AssetProviders, - source: &AssetProvider, - destination: &AssetProvider, - ) -> Self { - let data = Arc::new(AssetProcessorData::new( - providers.get_source_reader(source), - providers.get_source_writer(source), - providers.get_destination_reader(destination), - providers.get_destination_writer(destination), - )); - let destination_reader = providers.get_destination_reader(destination); + pub fn new(source: &mut AssetSourceBuilders) -> Self { + let data = Arc::new(AssetProcessorData::new(source.build_sources(true, false))); // The asset processor uses its own asset server with its own id space - let server = AssetServer::new( - Box::new(ProcessorGatedReader::new(destination_reader, data.clone())), - true, - ); + let mut sources = source.build_sources(false, false); + sources.gate_on_processor(data.clone()); + let server = AssetServer::new(sources, AssetServerMode::Processed, false); Self { server, data } } @@ -114,24 +98,18 @@ impl AssetProcessor { *self.data.state.read().await } - /// Retrieves the "source" [`AssetReader`] (the place where user-provided unprocessed "asset sources" are stored) - pub fn source_reader(&self) -> &dyn AssetReader { - &*self.data.source_reader - } - - /// Retrieves the "source" [`AssetWriter`] (the place where user-provided unprocessed "asset sources" are stored) - pub fn source_writer(&self) -> &dyn AssetWriter { - &*self.data.source_writer - } - - /// Retrieves the "destination" [`AssetReader`] (the place where processed / [`AssetProcessor`]-managed assets are stored) - pub fn destination_reader(&self) -> &dyn AssetReader { - &*self.data.destination_reader + /// Retrieves the [`AssetSource`] for this processor + #[inline] + pub fn get_source<'a, 'b>( + &'a self, + id: impl Into>, + ) -> Result<&'a AssetSource, MissingAssetSourceError> { + self.data.sources.get(id.into()) } - /// Retrieves the "destination" [`AssetWriter`] (the place where processed / [`AssetProcessor`]-managed assets are stored) - pub fn destination_writer(&self) -> &dyn AssetWriter { - &*self.data.destination_writer + #[inline] + pub fn sources(&self) -> &AssetSources { + &self.data.sources } /// Logs an unrecoverable error. On the next run of the processor, all assets will be regenerated. This should only be used as a last resort. @@ -144,14 +122,14 @@ impl AssetProcessor { /// Logs the start of an asset being processed. If this is not followed at some point in the log by a closing [`AssetProcessor::log_end_processing`], /// in the next run of the processor the asset processing will be considered "incomplete" and it will be reprocessed. - async fn log_begin_processing(&self, path: &Path) { + async fn log_begin_processing(&self, path: &AssetPath<'_>) { let mut log = self.data.log.write().await; let log = log.as_mut().unwrap(); log.begin_processing(path).await.unwrap(); } /// Logs the end of an asset being successfully processed. See [`AssetProcessor::log_begin_processing`]. - async fn log_end_processing(&self, path: &Path) { + async fn log_end_processing(&self, path: &AssetPath<'_>) { let mut log = self.data.log.write().await; let log = log.as_mut().unwrap(); log.end_processing(path).await.unwrap(); @@ -172,10 +150,11 @@ impl AssetProcessor { } /// Processes all assets. This will: + /// * For each "processed [`AssetSource`]: /// * Scan the [`ProcessorTransactionLog`] and recover from any failures detected - /// * Scan the destination [`AssetProvider`] to build the current view of already processed assets. - /// * Scan the source [`AssetProvider`] and remove any processed "destination" assets that are invalid or no longer exist. - /// * For each asset in the `source` [`AssetProvider`], kick off a new "process job", which will process the asset + /// * Scan the processed [`AssetReader`] to build the current view of already processed assets. + /// * Scan the unprocessed [`AssetReader`] and remove any final processed assets that are invalid or no longer exist. + /// * For each asset in the unprocessed [`AssetReader`], kick off a new "process job", which will process the asset /// (if the latest version of the asset has not been processed). #[cfg(all(not(target_arch = "wasm32"), feature = "multi-threaded"))] pub fn process_assets(&self) { @@ -184,8 +163,11 @@ impl AssetProcessor { IoTaskPool::get().scope(|scope| { scope.spawn(async move { self.initialize().await.unwrap(); - let path = PathBuf::from(""); - self.process_assets_internal(scope, path).await.unwrap(); + for source in self.sources().iter_processed() { + self.process_assets_internal(scope, source, PathBuf::from("")) + .await + .unwrap(); + } }); }); // This must happen _after_ the scope resolves or it will happen "too early" @@ -195,20 +177,24 @@ impl AssetProcessor { debug!("Processing finished in {:?}", end_time - start_time); } - /// Listens for changes to assets in the source [`AssetProvider`] and update state accordingly. + /// Listens for changes to assets in the source [`AssetSource`] and update state accordingly. // PERF: parallelize change event processing pub async fn listen_for_source_change_events(&self) { debug!("Listening for changes to source assets"); loop { let mut started_processing = false; - for event in self.data.source_event_receiver.try_iter() { - if !started_processing { - self.set_state(ProcessorState::Processing).await; - started_processing = true; - } + for source in self.data.sources.iter_processed() { + if let Some(receiver) = source.event_receiver() { + for event in receiver.try_iter() { + if !started_processing { + self.set_state(ProcessorState::Processing).await; + started_processing = true; + } - self.handle_asset_source_event(event).await; + self.handle_asset_source_event(source, event).await; + } + } } if started_processing { @@ -217,84 +203,91 @@ impl AssetProcessor { } } - async fn handle_asset_source_event(&self, event: AssetSourceEvent) { + async fn handle_asset_source_event(&self, source: &AssetSource, event: AssetSourceEvent) { trace!("{event:?}"); match event { AssetSourceEvent::AddedAsset(path) | AssetSourceEvent::AddedMeta(path) | AssetSourceEvent::ModifiedAsset(path) | AssetSourceEvent::ModifiedMeta(path) => { - self.process_asset(&path).await; + self.process_asset(source, path).await; } AssetSourceEvent::RemovedAsset(path) => { - self.handle_removed_asset(path).await; + self.handle_removed_asset(source, path).await; } AssetSourceEvent::RemovedMeta(path) => { - self.handle_removed_meta(&path).await; + self.handle_removed_meta(source, path).await; } AssetSourceEvent::AddedFolder(path) => { - self.handle_added_folder(path).await; + self.handle_added_folder(source, path).await; } // NOTE: As a heads up for future devs: this event shouldn't be run in parallel with other events that might // touch this folder (ex: the folder might be re-created with new assets). Clean up the old state first. // Currently this event handler is not parallel, but it could be (and likely should be) in the future. AssetSourceEvent::RemovedFolder(path) => { - self.handle_removed_folder(&path).await; + self.handle_removed_folder(source, &path).await; } AssetSourceEvent::RenamedAsset { old, new } => { // If there was a rename event, but the path hasn't changed, this asset might need reprocessing. // Sometimes this event is returned when an asset is moved "back" into the asset folder if old == new { - self.process_asset(&new).await; + self.process_asset(source, new).await; } else { - self.handle_renamed_asset(old, new).await; + self.handle_renamed_asset(source, old, new).await; } } AssetSourceEvent::RenamedMeta { old, new } => { // If there was a rename event, but the path hasn't changed, this asset meta might need reprocessing. // Sometimes this event is returned when an asset meta is moved "back" into the asset folder if old == new { - self.process_asset(&new).await; + self.process_asset(source, new).await; } else { debug!("Meta renamed from {old:?} to {new:?}"); let mut infos = self.data.asset_infos.write().await; // Renaming meta should not assume that an asset has also been renamed. Check both old and new assets to see // if they should be re-imported (and/or have new meta generated) - infos.check_reprocess_queue.push_back(old); - infos.check_reprocess_queue.push_back(new); + let new_asset_path = AssetPath::from(new).with_source(source.id()); + let old_asset_path = AssetPath::from(old).with_source(source.id()); + infos.check_reprocess_queue.push_back(old_asset_path); + infos.check_reprocess_queue.push_back(new_asset_path); } } AssetSourceEvent::RenamedFolder { old, new } => { // If there was a rename event, but the path hasn't changed, this asset folder might need reprocessing. // Sometimes this event is returned when an asset meta is moved "back" into the asset folder if old == new { - self.handle_added_folder(new).await; + self.handle_added_folder(source, new).await; } else { // PERF: this reprocesses everything in the moved folder. this is not necessary in most cases, but // requires some nuance when it comes to path handling. - self.handle_removed_folder(&old).await; - self.handle_added_folder(new).await; + self.handle_removed_folder(source, &old).await; + self.handle_added_folder(source, new).await; } } AssetSourceEvent::RemovedUnknown { path, is_meta } => { - match self.destination_reader().is_directory(&path).await { + let processed_reader = source.processed_reader().unwrap(); + match processed_reader.is_directory(&path).await { Ok(is_directory) => { if is_directory { - self.handle_removed_folder(&path).await; + self.handle_removed_folder(source, &path).await; } else if is_meta { - self.handle_removed_meta(&path).await; + self.handle_removed_meta(source, path).await; } else { - self.handle_removed_asset(path).await; + self.handle_removed_asset(source, path).await; } } Err(err) => { - if let AssetReaderError::NotFound(_) = err { - // if the path is not found, a processed version does not exist - } else { - error!( - "Path '{path:?}' as removed, but the destination reader could not determine if it \ - was a folder or a file due to the following error: {err}" - ); + match err { + AssetReaderError::NotFound(_) => { + // if the path is not found, a processed version does not exist + } + AssetReaderError::Io(err) => { + error!( + "Path '{}' was removed, but the destination reader could not determine if it \ + was a folder or a file due to the following error: {err}", + AssetPath::from_path(&path).with_source(source.id()) + ); + } } } } @@ -302,38 +295,44 @@ impl AssetProcessor { } } - async fn handle_added_folder(&self, path: PathBuf) { - debug!("Folder {:?} was added. Attempting to re-process", path); + async fn handle_added_folder(&self, source: &AssetSource, path: PathBuf) { + debug!( + "Folder {} was added. Attempting to re-process", + AssetPath::from_path(&path).with_source(source.id()) + ); #[cfg(any(target_arch = "wasm32", not(feature = "multi-threaded")))] error!("AddFolder event cannot be handled in single threaded mode (or WASM) yet."); #[cfg(all(not(target_arch = "wasm32"), feature = "multi-threaded"))] IoTaskPool::get().scope(|scope| { scope.spawn(async move { - self.process_assets_internal(scope, path).await.unwrap(); + self.process_assets_internal(scope, source, path) + .await + .unwrap(); }); }); } /// Responds to a removed meta event by reprocessing the asset at the given path. - async fn handle_removed_meta(&self, path: &Path) { + async fn handle_removed_meta(&self, source: &AssetSource, path: PathBuf) { // If meta was removed, we might need to regenerate it. // Likewise, the user might be manually re-adding the asset. // Therefore, we shouldn't automatically delete the asset ... that is a // user-initiated action. debug!( "Meta for asset {:?} was removed. Attempting to re-process", - path + AssetPath::from_path(&path).with_source(source.id()) ); - self.process_asset(path).await; + self.process_asset(source, path).await; } /// Removes all processed assets stored at the given path (respecting transactionality), then removes the folder itself. - async fn handle_removed_folder(&self, path: &Path) { + async fn handle_removed_folder(&self, source: &AssetSource, path: &Path) { debug!("Removing folder {:?} because source was removed", path); - match self.destination_reader().read_directory(path).await { + let processed_reader = source.processed_reader().unwrap(); + match processed_reader.read_directory(path).await { Ok(mut path_stream) => { while let Some(child_path) = path_stream.next().await { - self.handle_removed_asset(child_path).await; + self.handle_removed_asset(source, child_path).await; } } Err(err) => match err { @@ -349,28 +348,32 @@ impl AssetProcessor { } }, } - if let Err(AssetWriterError::Io(err)) = - self.destination_writer().remove_directory(path).await - { - // we can ignore NotFound because if the "final" file in a folder was removed - // then we automatically clean up this folder - if err.kind() != ErrorKind::NotFound { - error!("Failed to remove destination folder that no longer exists in asset source {path:?}: {err}"); + let processed_writer = source.processed_writer().unwrap(); + if let Err(err) = processed_writer.remove_directory(path).await { + match err { + AssetWriterError::Io(err) => { + // we can ignore NotFound because if the "final" file in a folder was removed + // then we automatically clean up this folder + if err.kind() != ErrorKind::NotFound { + let asset_path = AssetPath::from_path(path).with_source(source.id()); + error!("Failed to remove destination folder that no longer exists in {asset_path}: {err}"); + } + } } } } /// Removes the processed version of an asset and associated in-memory metadata. This will block until all existing reads/writes to the /// asset have finished, thanks to the `file_transaction_lock`. - async fn handle_removed_asset(&self, path: PathBuf) { - debug!("Removing processed {:?} because source was removed", path); - let asset_path = AssetPath::from_path(path); + async fn handle_removed_asset(&self, source: &AssetSource, path: PathBuf) { + let asset_path = AssetPath::from(path).with_source(source.id()); + debug!("Removing processed {asset_path} because source was removed"); let mut infos = self.data.asset_infos.write().await; if let Some(info) = infos.get(&asset_path) { // we must wait for uncontested write access to the asset source to ensure existing readers / writers // can finish their operations let _write_lock = info.file_transaction_lock.write(); - self.remove_processed_asset_and_meta(asset_path.path()) + self.remove_processed_asset_and_meta(source, asset_path.path()) .await; } infos.remove(&asset_path).await; @@ -378,22 +381,25 @@ impl AssetProcessor { /// Handles a renamed source asset by moving it's processed results to the new location and updating in-memory paths + metadata. /// This will cause direct path dependencies to break. - async fn handle_renamed_asset(&self, old: PathBuf, new: PathBuf) { + async fn handle_renamed_asset(&self, source: &AssetSource, old: PathBuf, new: PathBuf) { let mut infos = self.data.asset_infos.write().await; - let old_asset_path = AssetPath::from_path(old); - if let Some(info) = infos.get(&old_asset_path) { + let old = AssetPath::from(old).with_source(source.id()); + let new = AssetPath::from(new).with_source(source.id()); + let processed_writer = source.processed_writer().unwrap(); + if let Some(info) = infos.get(&old) { // we must wait for uncontested write access to the asset source to ensure existing readers / writers // can finish their operations let _write_lock = info.file_transaction_lock.write(); - let old = old_asset_path.path(); - self.destination_writer().rename(old, &new).await.unwrap(); - self.destination_writer() - .rename_meta(old, &new) + processed_writer + .rename(old.path(), new.path()) + .await + .unwrap(); + processed_writer + .rename_meta(old.path(), new.path()) .await .unwrap(); } - let new_asset_path = AssetPath::from_path(new); - infos.rename(&old_asset_path, &new_asset_path).await; + infos.rename(&old, &new).await; } async fn finish_processing_assets(&self) { @@ -408,19 +414,20 @@ impl AssetProcessor { fn process_assets_internal<'scope>( &'scope self, scope: &'scope bevy_tasks::Scope<'scope, '_, ()>, + source: &'scope AssetSource, path: PathBuf, ) -> bevy_utils::BoxedFuture<'scope, Result<(), AssetReaderError>> { Box::pin(async move { - if self.source_reader().is_directory(&path).await? { - let mut path_stream = self.source_reader().read_directory(&path).await?; + if source.reader().is_directory(&path).await? { + let mut path_stream = source.reader().read_directory(&path).await?; while let Some(path) = path_stream.next().await { - self.process_assets_internal(scope, path).await?; + self.process_assets_internal(scope, source, path).await?; } } else { // Files without extensions are skipped let processor = self.clone(); scope.spawn(async move { - processor.process_asset(&path).await; + processor.process_asset(source, path).await; }); } Ok(()) @@ -434,8 +441,9 @@ impl AssetProcessor { IoTaskPool::get().scope(|scope| { for path in check_reprocess_queue.drain(..) { let processor = self.clone(); + let source = self.get_source(path.source()).unwrap(); scope.spawn(async move { - processor.process_asset(&path).await; + processor.process_asset(source, path.into()).await; }); } }); @@ -471,7 +479,7 @@ impl AssetProcessor { processors.get(processor_type_name).cloned() } - /// Populates the initial view of each asset by scanning the source and destination folders. + /// Populates the initial view of each asset by scanning the unprocessed and processed asset folders. /// This info will later be used to determine whether or not to re-process an asset /// /// This will validate transactions and recover failed transactions when necessary. @@ -512,68 +520,81 @@ impl AssetProcessor { }) } - let mut source_paths = Vec::new(); - let source_reader = self.source_reader(); - get_asset_paths(source_reader, None, PathBuf::from(""), &mut source_paths) + for source in self.sources().iter_processed() { + let Ok(processed_reader) = source.processed_reader() else { + continue; + }; + let Ok(processed_writer) = source.processed_writer() else { + continue; + }; + let mut unprocessed_paths = Vec::new(); + get_asset_paths( + source.reader(), + None, + PathBuf::from(""), + &mut unprocessed_paths, + ) .await .map_err(InitializeError::FailedToReadSourcePaths)?; - let mut destination_paths = Vec::new(); - let destination_reader = self.destination_reader(); - let destination_writer = self.destination_writer(); - get_asset_paths( - destination_reader, - Some(destination_writer), - PathBuf::from(""), - &mut destination_paths, - ) - .await - .map_err(InitializeError::FailedToReadDestinationPaths)?; - - for path in &source_paths { - asset_infos.get_or_insert(AssetPath::from_path(path.clone())); - } + let mut processed_paths = Vec::new(); + get_asset_paths( + processed_reader, + Some(processed_writer), + PathBuf::from(""), + &mut processed_paths, + ) + .await + .map_err(InitializeError::FailedToReadDestinationPaths)?; - for path in &destination_paths { - let asset_path = AssetPath::from_path(path.clone()); - let mut dependencies = Vec::new(); - if let Some(info) = asset_infos.get_mut(&asset_path) { - match self.destination_reader().read_meta_bytes(path).await { - Ok(meta_bytes) => { - match ron::de::from_bytes::(&meta_bytes) { - Ok(minimal) => { - trace!( - "Populated processed info for asset {path:?} {:?}", - minimal.processed_info - ); + for path in unprocessed_paths { + asset_infos.get_or_insert(AssetPath::from(path).with_source(source.id())); + } - if let Some(processed_info) = &minimal.processed_info { - for process_dependency_info in - &processed_info.process_dependencies - { - dependencies.push(process_dependency_info.path.clone()); + for path in processed_paths { + let mut dependencies = Vec::new(); + let asset_path = AssetPath::from(path).with_source(source.id()); + if let Some(info) = asset_infos.get_mut(&asset_path) { + match processed_reader.read_meta_bytes(asset_path.path()).await { + Ok(meta_bytes) => { + match ron::de::from_bytes::(&meta_bytes) { + Ok(minimal) => { + trace!( + "Populated processed info for asset {asset_path} {:?}", + minimal.processed_info + ); + + if let Some(processed_info) = &minimal.processed_info { + for process_dependency_info in + &processed_info.process_dependencies + { + dependencies.push(process_dependency_info.path.clone()); + } } + info.processed_info = minimal.processed_info; + } + Err(err) => { + trace!("Removing processed data for {asset_path} because meta could not be parsed: {err}"); + self.remove_processed_asset_and_meta(source, asset_path.path()) + .await; } - info.processed_info = minimal.processed_info; - } - Err(err) => { - trace!("Removing processed data for {path:?} because meta could not be parsed: {err}"); - self.remove_processed_asset_and_meta(path).await; } } + Err(err) => { + trace!("Removing processed data for {asset_path} because meta failed to load: {err}"); + self.remove_processed_asset_and_meta(source, asset_path.path()) + .await; + } } - Err(err) => { - trace!("Removing processed data for {path:?} because meta failed to load: {err}"); - self.remove_processed_asset_and_meta(path).await; - } + } else { + trace!("Removing processed data for non-existent asset {asset_path}"); + self.remove_processed_asset_and_meta(source, asset_path.path()) + .await; } - } else { - trace!("Removing processed data for non-existent asset {path:?}"); - self.remove_processed_asset_and_meta(path).await; - } - for dependency in dependencies { - asset_infos.add_dependant(&dependency, asset_path.clone()); + for dependency in dependencies { + asset_infos.add_dependant(&dependency, asset_path.clone()); + } } } @@ -584,19 +605,20 @@ impl AssetProcessor { /// Removes the processed version of an asset and its metadata, if it exists. This _is not_ transactional like `remove_processed_asset_transactional`, nor /// does it remove existing in-memory metadata. - async fn remove_processed_asset_and_meta(&self, path: &Path) { - if let Err(err) = self.destination_writer().remove(path).await { + async fn remove_processed_asset_and_meta(&self, source: &AssetSource, path: &Path) { + if let Err(err) = source.processed_writer().unwrap().remove(path).await { warn!("Failed to remove non-existent asset {path:?}: {err}"); } - if let Err(err) = self.destination_writer().remove_meta(path).await { + if let Err(err) = source.processed_writer().unwrap().remove_meta(path).await { warn!("Failed to remove non-existent meta {path:?}: {err}"); } - self.clean_empty_processed_ancestor_folders(path).await; + self.clean_empty_processed_ancestor_folders(source, path) + .await; } - async fn clean_empty_processed_ancestor_folders(&self, path: &Path) { + async fn clean_empty_processed_ancestor_folders(&self, source: &AssetSource, path: &Path) { // As a safety precaution don't delete absolute paths to avoid deleting folders outside of the destination folder if path.is_absolute() { error!("Attempted to clean up ancestor folders of an absolute path. This is unsafe so the operation was skipped."); @@ -606,8 +628,9 @@ impl AssetProcessor { if parent == Path::new("") { break; } - if self - .destination_writer() + if source + .processed_writer() + .unwrap() .remove_empty_directory(parent) .await .is_err() @@ -624,33 +647,39 @@ impl AssetProcessor { /// to block reads until the asset is processed). /// /// [`LoadContext`]: crate::loader::LoadContext - async fn process_asset(&self, path: &Path) { - let result = self.process_asset_internal(path).await; + /// [`ProcessorGatedReader`]: crate::io::processor_gated::ProcessorGatedReader + async fn process_asset(&self, source: &AssetSource, path: PathBuf) { + let asset_path = AssetPath::from(path).with_source(source.id()); + let result = self.process_asset_internal(source, &asset_path).await; let mut infos = self.data.asset_infos.write().await; - let asset_path = AssetPath::from_path(path.to_owned()); infos.finish_processing(asset_path, result).await; } - async fn process_asset_internal(&self, path: &Path) -> Result { - if path.extension().is_none() { - return Err(ProcessError::ExtensionRequired); - } - let asset_path = AssetPath::from_path(path.to_path_buf()); + async fn process_asset_internal( + &self, + source: &AssetSource, + asset_path: &AssetPath<'static>, + ) -> Result { + // TODO: The extension check was removed now tht AssetPath is the input. is that ok? // TODO: check if already processing to protect against duplicate hot-reload events - debug!("Processing {:?}", path); + debug!("Processing {:?}", asset_path); let server = &self.server; + let path = asset_path.path(); + let reader = source.reader(); + + let reader_err = |err| ProcessError::AssetReaderError { + path: asset_path.clone(), + err, + }; + let writer_err = |err| ProcessError::AssetWriterError { + path: asset_path.clone(), + err, + }; // Note: we get the asset source reader first because we don't want to create meta files for assets that don't have source files - let mut reader = self.source_reader().read(path).await.map_err(|e| match e { - AssetReaderError::NotFound(_) => ProcessError::MissingAssetSource(path.to_owned()), - AssetReaderError::Io(err) => ProcessError::AssetSourceIoError(err), - })?; - - let (mut source_meta, meta_bytes, processor) = match self - .source_reader() - .read_meta_bytes(path) - .await - { + let mut byte_reader = reader.read(path).await.map_err(reader_err)?; + + let (mut source_meta, meta_bytes, processor) = match reader.read_meta_bytes(path).await { Ok(meta_bytes) => { let minimal: AssetMetaMinimal = ron::de::from_bytes(&meta_bytes).map_err(|e| { ProcessError::DeserializeMetaError(DeserializeMetaError::DeserializeMinimal(e)) @@ -684,7 +713,7 @@ impl AssetProcessor { let meta = processor.default_meta(); (meta, Some(processor)) } else { - match server.get_path_asset_loader(&asset_path).await { + match server.get_path_asset_loader(asset_path.clone()).await { Ok(loader) => (loader.default_meta(), None), Err(MissingAssetLoaderForExtensionError { .. }) => { let meta: Box = @@ -695,19 +724,31 @@ impl AssetProcessor { }; let meta_bytes = meta.serialize(); // write meta to source location if it doesn't already exist - self.source_writer() + source + .writer()? .write_meta_bytes(path, &meta_bytes) - .await?; + .await + .map_err(writer_err)?; (meta, meta_bytes, processor) } - Err(err) => return Err(ProcessError::ReadAssetMetaError(err)), + Err(err) => { + return Err(ProcessError::ReadAssetMetaError { + path: asset_path.clone(), + err, + }) + } }; + let processed_writer = source.processed_writer()?; + let mut asset_bytes = Vec::new(); - reader + byte_reader .read_to_end(&mut asset_bytes) .await - .map_err(ProcessError::AssetSourceIoError)?; + .map_err(|e| ProcessError::AssetReaderError { + path: asset_path.clone(), + err: AssetReaderError::Io(e), + })?; // PERF: in theory these hashes could be streamed if we want to avoid allocating the whole asset. // The downside is that reading assets would need to happen twice (once for the hash and once for the asset loader) @@ -722,7 +763,7 @@ impl AssetProcessor { { let infos = self.data.asset_infos.read().await; if let Some(current_processed_info) = infos - .get(&asset_path) + .get(asset_path) .and_then(|i| i.processed_info.as_ref()) { if current_processed_info.hash == new_hash { @@ -754,18 +795,24 @@ impl AssetProcessor { // NOTE: if processing the asset fails this will produce an "unfinished" log entry, forcing a rebuild on next run. // Directly writing to the asset destination in the processor necessitates this behavior // TODO: this class of failure can be recovered via re-processing + smarter log validation that allows for duplicate transactions in the event of failures - self.log_begin_processing(path).await; + self.log_begin_processing(asset_path).await; if let Some(processor) = processor { - let mut writer = self.destination_writer().write(path).await?; + let mut writer = processed_writer.write(path).await.map_err(writer_err)?; let mut processed_meta = { let mut context = - ProcessContext::new(self, &asset_path, &asset_bytes, &mut new_processed_info); + ProcessContext::new(self, asset_path, &asset_bytes, &mut new_processed_info); processor .process(&mut context, source_meta, &mut *writer) .await? }; - writer.flush().await.map_err(AssetWriterError::Io)?; + writer + .flush() + .await + .map_err(|e| ProcessError::AssetWriterError { + path: asset_path.clone(), + err: AssetWriterError::Io(e), + })?; let full_hash = get_full_asset_hash( new_hash, @@ -777,20 +824,23 @@ impl AssetProcessor { new_processed_info.full_hash = full_hash; *processed_meta.processed_info_mut() = Some(new_processed_info.clone()); let meta_bytes = processed_meta.serialize(); - self.destination_writer() + processed_writer .write_meta_bytes(path, &meta_bytes) - .await?; + .await + .map_err(writer_err)?; } else { - self.destination_writer() + processed_writer .write_bytes(path, &asset_bytes) - .await?; + .await + .map_err(writer_err)?; *source_meta.processed_info_mut() = Some(new_processed_info.clone()); let meta_bytes = source_meta.serialize(); - self.destination_writer() + processed_writer .write_meta_bytes(path, &meta_bytes) - .await?; + .await + .map_err(writer_err)?; } - self.log_end_processing(path).await; + self.log_end_processing(asset_path).await; Ok(ProcessResult::Processed(new_processed_info)) } @@ -818,27 +868,35 @@ impl AssetProcessor { } LogEntryError::UnfinishedTransaction(path) => { debug!("Asset {path:?} did not finish processing. Clearing state for that asset"); - if let Err(err) = self.destination_writer().remove(&path).await { + let mut unrecoverable_err = |message: &dyn std::fmt::Display| { + error!("Failed to remove asset {path:?}: {message}"); + state_is_valid = false; + }; + let Ok(source) = self.get_source(path.source()) else { + (unrecoverable_err)(&"AssetSource does not exist"); + continue; + }; + let Ok(processed_writer) = source.processed_writer() else { + (unrecoverable_err)(&"AssetSource does not have a processed AssetWriter registered"); + continue; + }; + + if let Err(err) = processed_writer.remove(path.path()).await { match err { AssetWriterError::Io(err) => { // any error but NotFound means we could be in a bad state if err.kind() != ErrorKind::NotFound { - error!("Failed to remove asset {path:?}: {err}"); - state_is_valid = false; + (unrecoverable_err)(&err); } } } } - if let Err(err) = self.destination_writer().remove_meta(&path).await - { + if let Err(err) = processed_writer.remove_meta(path.path()).await { match err { AssetWriterError::Io(err) => { // any error but NotFound means we could be in a bad state if err.kind() != ErrorKind::NotFound { - error!( - "Failed to remove asset meta {path:?}: {err}" - ); - state_is_valid = false; + (unrecoverable_err)(&err); } } } @@ -852,12 +910,16 @@ impl AssetProcessor { if !state_is_valid { error!("Processed asset transaction log state was invalid and unrecoverable for some reason (see previous logs). Removing processed assets and starting fresh."); - if let Err(err) = self - .destination_writer() - .remove_assets_in_directory(Path::new("")) - .await - { - panic!("Processed assets were in a bad state. To correct this, the asset processor attempted to remove all processed assets and start from scratch. This failed. There is no way to continue. Try restarting, or deleting imported asset folder manually. {err}"); + for source in self.sources().iter_processed() { + let Ok(processed_writer) = source.processed_writer() else { + continue; + }; + if let Err(err) = processed_writer + .remove_assets_in_directory(Path::new("")) + .await + { + panic!("Processed assets were in a bad state. To correct this, the asset processor attempted to remove all processed assets and start from scratch. This failed. There is no way to continue. Try restarting, or deleting imported asset folder manually. {err}"); + } } } } @@ -870,35 +932,20 @@ impl AssetProcessor { } impl AssetProcessorData { - pub fn new( - source_reader: Box, - source_writer: Box, - destination_reader: Box, - destination_writer: Box, - ) -> Self { + pub fn new(source: AssetSources) -> Self { let (mut finished_sender, finished_receiver) = async_broadcast::broadcast(1); let (mut initialized_sender, initialized_receiver) = async_broadcast::broadcast(1); // allow overflow on these "one slot" channels to allow receivers to retrieve the "latest" state, and to allow senders to // not block if there was older state present. finished_sender.set_overflow(true); initialized_sender.set_overflow(true); - let (source_event_sender, source_event_receiver) = crossbeam_channel::unbounded(); - // TODO: watching for changes could probably be entirely optional / we could just warn here - let source_watcher = source_reader.watch_for_changes(source_event_sender); - if source_watcher.is_none() { - error!("{}", CANNOT_WATCH_ERROR_MESSAGE); - } + AssetProcessorData { - source_reader, - source_writer, - destination_reader, - destination_writer, + sources: source, finished_sender, finished_receiver, initialized_sender, initialized_receiver, - source_event_receiver, - _source_watcher: source_watcher, state: async_lock::RwLock::new(ProcessorState::Initializing), log: Default::default(), processors: Default::default(), @@ -908,11 +955,11 @@ impl AssetProcessorData { } /// Returns a future that will not finish until the path has been processed. - pub async fn wait_until_processed(&self, path: &Path) -> ProcessStatus { + pub async fn wait_until_processed(&self, path: AssetPath<'static>) -> ProcessStatus { self.wait_until_initialized().await; let mut receiver = { let infos = self.asset_infos.write().await; - let info = infos.get(&AssetPath::from_path(path.to_path_buf())); + let info = infos.get(&path); match info { Some(info) => match info.status { Some(result) => return result, @@ -1038,7 +1085,7 @@ pub struct ProcessorAssetInfos { /// Therefore this _must_ always be consistent with the `infos` data. If a new asset is added to `infos`, it should /// check this maps for dependencies and add them. If an asset is removed, it should update the dependants here. non_existent_dependants: HashMap, HashSet>>, - check_reprocess_queue: VecDeque, + check_reprocess_queue: VecDeque>, } impl ProcessorAssetInfos { @@ -1100,7 +1147,7 @@ impl ProcessorAssetInfos { info.update_status(ProcessStatus::Processed).await; let dependants = info.dependants.iter().cloned().collect::>(); for path in dependants { - self.check_reprocess_queue.push_back(path.path().to_owned()); + self.check_reprocess_queue.push_back(path); } } Ok(ProcessResult::SkippedNotChanged) => { @@ -1118,20 +1165,21 @@ impl ProcessorAssetInfos { // Skip assets without extensions } Err(ProcessError::MissingAssetLoaderForExtension(_)) => { - trace!("No loader found for {:?}", asset_path); + trace!("No loader found for {asset_path}"); } - Err(ProcessError::MissingAssetSource(_)) => { + Err(ProcessError::AssetReaderError { + err: AssetReaderError::NotFound(_), + .. + }) => { // if there is no asset source, no processing can be done - trace!( - "No need to process asset {:?} because it does not exist", - asset_path - ); + trace!("No need to process asset {asset_path} because it does not exist"); } Err(err) => { - error!("Failed to process asset {:?}: {:?}", asset_path, err); + error!("Failed to process asset {asset_path}: {err}"); // if this failed because a dependency could not be loaded, make sure it is reprocessed if that dependency is reprocessed - if let ProcessError::AssetLoadError(AssetLoadError::CannotLoadDependency { + if let ProcessError::AssetLoadError(AssetLoadError::AssetLoaderError { path: dependency, + .. }) = err { let info = self.get_mut(&asset_path).expect("info should exist"); @@ -1220,10 +1268,10 @@ impl ProcessorAssetInfos { new_info.dependants.iter().cloned().collect() }; // Queue the asset for a reprocess check, in case it needs new meta. - self.check_reprocess_queue.push_back(new.path().to_owned()); + self.check_reprocess_queue.push_back(new.clone()); for dependant in dependants { // Queue dependants for reprocessing because they might have been waiting for this asset. - self.check_reprocess_queue.push_back(dependant.into()); + self.check_reprocess_queue.push_back(dependant); } } } diff --git a/crates/bevy_asset/src/processor/process.rs b/crates/bevy_asset/src/processor/process.rs index 0d0d3f468e2fb..ef6a3fbb2f5c5 100644 --- a/crates/bevy_asset/src/processor/process.rs +++ b/crates/bevy_asset/src/processor/process.rs @@ -1,5 +1,8 @@ use crate::{ - io::{AssetReaderError, AssetWriterError, Writer}, + io::{ + AssetReaderError, AssetWriterError, MissingAssetWriterError, + MissingProcessedAssetReaderError, MissingProcessedAssetWriterError, Writer, + }, meta::{AssetAction, AssetMeta, AssetMetaDyn, ProcessDependencyInfo, ProcessedInfo, Settings}, processor::AssetProcessor, saver::{AssetSaver, SavedAsset}, @@ -8,7 +11,7 @@ use crate::{ }; use bevy_utils::BoxedFuture; use serde::{Deserialize, Serialize}; -use std::{marker::PhantomData, path::PathBuf}; +use std::marker::PhantomData; use thiserror::Error; /// Asset "processor" logic that reads input asset bytes (stored on [`ProcessContext`]), processes the value in some way, @@ -70,20 +73,33 @@ pub struct LoadAndSaveSettings { /// An error that is encountered during [`Process::process`]. #[derive(Error, Debug)] pub enum ProcessError { - #[error("The asset source file for '{0}' does not exist")] - MissingAssetSource(PathBuf), - #[error(transparent)] - AssetSourceIoError(std::io::Error), #[error(transparent)] MissingAssetLoaderForExtension(#[from] MissingAssetLoaderForExtensionError), #[error(transparent)] MissingAssetLoaderForTypeName(#[from] MissingAssetLoaderForTypeNameError), #[error("The processor '{0}' does not exist")] MissingProcessor(String), + #[error("Encountered an AssetReader error for '{path}': {err}")] + AssetReaderError { + path: AssetPath<'static>, + err: AssetReaderError, + }, + #[error("Encountered an AssetWriter error for '{path}': {err}")] + AssetWriterError { + path: AssetPath<'static>, + err: AssetWriterError, + }, + #[error(transparent)] + MissingAssetWriterError(#[from] MissingAssetWriterError), + #[error(transparent)] + MissingProcessedAssetReaderError(#[from] MissingProcessedAssetReaderError), #[error(transparent)] - AssetWriterError(#[from] AssetWriterError), - #[error("Failed to read asset metadata {0:?}")] - ReadAssetMetaError(AssetReaderError), + MissingProcessedAssetWriterError(#[from] MissingProcessedAssetWriterError), + #[error("Failed to read asset metadata for {path}: {err}")] + ReadAssetMetaError { + path: AssetPath<'static>, + err: AssetReaderError, + }, #[error(transparent)] DeserializeMetaError(#[from] DeserializeMetaError), #[error(transparent)] diff --git a/crates/bevy_asset/src/server/mod.rs b/crates/bevy_asset/src/server/mod.rs index 121633cfade16..16d087331b3c2 100644 --- a/crates/bevy_asset/src/server/mod.rs +++ b/crates/bevy_asset/src/server/mod.rs @@ -2,7 +2,10 @@ mod info; use crate::{ folder::LoadedFolder, - io::{AssetReader, AssetReaderError, AssetSourceEvent, AssetWatcher, Reader}, + io::{ + AssetReader, AssetReaderError, AssetSource, AssetSourceEvent, AssetSourceId, AssetSources, + MissingAssetSourceError, MissingProcessedAssetReaderError, Reader, + }, loader::{AssetLoader, ErasedAssetLoader, LoadContext, LoadedAsset}, meta::{ loader_settings_meta_transform, AssetActionMinimal, AssetMetaDyn, AssetMetaMinimal, @@ -48,52 +51,53 @@ pub(crate) struct AssetServerData { pub(crate) loaders: Arc>, asset_event_sender: Sender, asset_event_receiver: Receiver, - source_event_receiver: Receiver, - reader: Box, - _watcher: Option>, + sources: AssetSources, + mode: AssetServerMode, +} + +/// The "asset mode" the server is currently in. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum AssetServerMode { + /// This server loads unprocessed assets. + Unprocessed, + /// This server loads processed assets. + Processed, } impl AssetServer { /// Create a new instance of [`AssetServer`]. If `watch_for_changes` is true, the [`AssetReader`] storage will watch for changes to /// asset sources and hot-reload them. - pub fn new(reader: Box, watch_for_changes: bool) -> Self { - Self::new_with_loaders(reader, Default::default(), watch_for_changes) + pub fn new(sources: AssetSources, mode: AssetServerMode, watching_for_changes: bool) -> Self { + Self::new_with_loaders(sources, Default::default(), mode, watching_for_changes) } pub(crate) fn new_with_loaders( - reader: Box, + sources: AssetSources, loaders: Arc>, - watch_for_changes: bool, + mode: AssetServerMode, + watching_for_changes: bool, ) -> Self { let (asset_event_sender, asset_event_receiver) = crossbeam_channel::unbounded(); - let (source_event_sender, source_event_receiver) = crossbeam_channel::unbounded(); let mut infos = AssetInfos::default(); - let watcher = if watch_for_changes { - infos.watching_for_changes = true; - let watcher = reader.watch_for_changes(source_event_sender); - if watcher.is_none() { - error!("{}", CANNOT_WATCH_ERROR_MESSAGE); - } - watcher - } else { - None - }; + infos.watching_for_changes = watching_for_changes; Self { data: Arc::new(AssetServerData { - reader, - _watcher: watcher, + sources, + mode, asset_event_sender, asset_event_receiver, - source_event_receiver, loaders, infos: RwLock::new(infos), }), } } - /// Returns the primary [`AssetReader`]. - pub fn reader(&self) -> &dyn AssetReader { - &*self.data.reader + /// Retrieves the [`AssetReader`] for the given `source`. + pub fn get_source<'a>( + &'a self, + source: impl Into>, + ) -> Result<&'a AssetSource, MissingAssetSourceError> { + self.data.sources.get(source.into()) } /// Registers a new [`AssetLoader`]. [`AssetLoader`]s must be registered before they can be used. @@ -450,28 +454,30 @@ impl AssetServer { /// contain handles to all assets in the folder. You can wait for all assets to load by checking the [`LoadedFolder`]'s /// [`RecursiveDependencyLoadState`]. #[must_use = "not using the returned strong handle may result in the unexpected release of the assets"] - pub fn load_folder(&self, path: impl AsRef) -> Handle { + pub fn load_folder<'a>(&self, path: impl Into>) -> Handle { let handle = { let mut infos = self.data.infos.write(); infos.create_loading_handle::() }; let id = handle.id().untyped(); + let path = path.into().into_owned(); fn load_folder<'a>( path: &'a Path, + reader: &'a dyn AssetReader, server: &'a AssetServer, handles: &'a mut Vec, ) -> bevy_utils::BoxedFuture<'a, Result<(), AssetLoadError>> { Box::pin(async move { - let is_dir = server.reader().is_directory(path).await?; + let is_dir = reader.is_directory(path).await?; if is_dir { - let mut path_stream = server.reader().read_directory(path.as_ref()).await?; + let mut path_stream = reader.read_directory(path.as_ref()).await?; while let Some(child_path) = path_stream.next().await { - if server.reader().is_directory(&child_path).await? { - load_folder(&child_path, server, handles).await?; + if reader.is_directory(&child_path).await? { + load_folder(&child_path, reader, server, handles).await?; } else { let path = child_path.to_str().expect("Path should be a valid string."); - match server.load_untyped_async(AssetPath::new(path)).await { + match server.load_untyped_async(AssetPath::parse(path)).await { Ok(handle) => handles.push(handle), // skip assets that cannot be loaded Err( @@ -488,11 +494,32 @@ impl AssetServer { } let server = self.clone(); - let owned_path = path.as_ref().to_owned(); IoTaskPool::get() .spawn(async move { + let Ok(source) = server.get_source(path.source()) else { + error!( + "Failed to load {path}. AssetSource {:?} does not exist", + path.source() + ); + return; + }; + + let asset_reader = match server.data.mode { + AssetServerMode::Unprocessed { .. } => source.reader(), + AssetServerMode::Processed { .. } => match source.processed_reader() { + Ok(reader) => reader, + Err(_) => { + error!( + "Failed to load {path}. AssetSource {:?} does not have a processed AssetReader", + path.source() + ); + return; + } + }, + }; + let mut handles = Vec::new(); - match load_folder(&owned_path, &server, &mut handles).await { + match load_folder(path.path(), asset_reader, &server, &mut handles).await { Ok(_) => server.send_asset_event(InternalAssetEvent::Loaded { id, loaded_asset: LoadedAsset::new_with_dependencies( @@ -586,6 +613,11 @@ impl AssetServer { Some(info.path.as_ref()?.clone()) } + /// Returns the [`AssetServerMode`] this server is currently in. + pub fn mode(&self) -> AssetServerMode { + self.data.mode + } + /// Pre-register a loader that will later be added. /// /// Assets loaded with matching extensions will be blocked until the @@ -641,34 +673,43 @@ impl AssetServer { ), AssetLoadError, > { + let source = self.get_source(asset_path.source())?; // NOTE: We grab the asset byte reader first to ensure this is transactional for AssetReaders like ProcessorGatedReader // The asset byte reader will "lock" the processed asset, preventing writes for the duration of the lock. // Then the meta reader, if meta exists, will correspond to the meta for the current "version" of the asset. // See ProcessedAssetInfo::file_transaction_lock for more context - let reader = self.data.reader.read(asset_path.path()).await?; - match self.data.reader.read_meta_bytes(asset_path.path()).await { + let asset_reader = match self.data.mode { + AssetServerMode::Unprocessed { .. } => source.reader(), + AssetServerMode::Processed { .. } => source.processed_reader()?, + }; + let reader = asset_reader.read(asset_path.path()).await?; + match asset_reader.read_meta_bytes(asset_path.path()).await { Ok(meta_bytes) => { // TODO: this isn't fully minimal yet. we only need the loader let minimal: AssetMetaMinimal = ron::de::from_bytes(&meta_bytes).map_err(|e| { - AssetLoadError::DeserializeMeta(DeserializeMetaError::DeserializeMinimal(e)) + AssetLoadError::DeserializeMeta { + path: asset_path.clone_owned(), + error: Box::new(DeserializeMetaError::DeserializeMinimal(e)), + } })?; let loader_name = match minimal.asset { AssetActionMinimal::Load { loader } => loader, AssetActionMinimal::Process { .. } => { return Err(AssetLoadError::CannotLoadProcessedAsset { - path: asset_path.clone().into_owned(), + path: asset_path.clone_owned(), }) } AssetActionMinimal::Ignore => { return Err(AssetLoadError::CannotLoadIgnoredAsset { - path: asset_path.clone().into_owned(), + path: asset_path.clone_owned(), }) } }; let loader = self.get_asset_loader_with_type_name(&loader_name).await?; - let meta = loader.deserialize_meta(&meta_bytes).map_err(|_| { - AssetLoadError::CannotLoadDependency { - path: asset_path.clone().into_owned(), + let meta = loader.deserialize_meta(&meta_bytes).map_err(|e| { + AssetLoadError::DeserializeMeta { + path: asset_path.clone_owned(), + error: Box::new(e), } })?; @@ -693,13 +734,16 @@ impl AssetServer { populate_hashes: bool, ) -> Result { // TODO: experiment with this - let asset_path = asset_path.clone().into_owned(); + let asset_path = asset_path.clone_owned(); let load_context = LoadContext::new(self, asset_path.clone(), load_dependencies, populate_hashes); - loader - .load(reader, meta, load_context) - .await - .map_err(|_| AssetLoadError::CannotLoadDependency { path: asset_path }) + loader.load(reader, meta, load_context).await.map_err(|e| { + AssetLoadError::AssetLoaderError { + path: asset_path.clone_owned(), + loader_name: loader.type_name(), + error: e, + } + }) } } @@ -742,17 +786,36 @@ pub fn handle_internal_asset_events(world: &mut World) { } let mut paths_to_reload = HashSet::new(); - for event in server.data.source_event_receiver.try_iter() { + let mut handle_event = |source: AssetSourceId<'static>, event: AssetSourceEvent| { match event { // TODO: if the asset was processed and the processed file was changed, the first modified event // should be skipped? AssetSourceEvent::ModifiedAsset(path) | AssetSourceEvent::ModifiedMeta(path) => { - let path = AssetPath::from_path(path); + let path = AssetPath::from(path).with_source(source); queue_ancestors(&path, &infos, &mut paths_to_reload); paths_to_reload.insert(path); } _ => {} } + }; + + for source in server.data.sources.iter() { + match server.data.mode { + AssetServerMode::Unprocessed { .. } => { + if let Some(receiver) = source.event_receiver() { + for event in receiver.try_iter() { + handle_event(source.id(), event); + } + } + } + AssetServerMode::Processed { .. } => { + if let Some(receiver) = source.processed_event_receiver() { + for event in receiver.try_iter() { + handle_event(source.id(), event); + } + } + } + } } for path in paths_to_reload { @@ -848,16 +911,27 @@ pub enum AssetLoadError { MissingAssetLoaderForTypeName(#[from] MissingAssetLoaderForTypeNameError), #[error(transparent)] AssetReaderError(#[from] AssetReaderError), + #[error(transparent)] + MissingAssetSourceError(#[from] MissingAssetSourceError), + #[error(transparent)] + MissingProcessedAssetReaderError(#[from] MissingProcessedAssetReaderError), #[error("Encountered an error while reading asset metadata bytes")] AssetMetaReadError, - #[error(transparent)] - DeserializeMeta(DeserializeMetaError), + #[error("Failed to deserialize meta for asset {path}: {error}")] + DeserializeMeta { + path: AssetPath<'static>, + error: Box, + }, #[error("Asset '{path}' is configured to be processed. It cannot be loaded directly.")] CannotLoadProcessedAsset { path: AssetPath<'static> }, #[error("Asset '{path}' is configured to be ignored. It cannot be loaded.")] CannotLoadIgnoredAsset { path: AssetPath<'static> }, - #[error("Asset '{path}' is a dependency. It cannot be loaded directly.")] - CannotLoadDependency { path: AssetPath<'static> }, + #[error("Failed to load asset '{path}' with asset loader '{loader_name}': {error}")] + AssetLoaderError { + path: AssetPath<'static>, + loader_name: &'static str, + error: Box, + }, } /// An error that occurs when an [`AssetLoader`] is not registered for a given extension. @@ -893,8 +967,3 @@ impl std::fmt::Debug for AssetServer { .finish() } } - -pub(crate) static CANNOT_WATCH_ERROR_MESSAGE: &str = - "Cannot watch for changes because the current `AssetReader` does not support it. If you are using \ - the FileAssetReader (the default on desktop platforms), enabling the filesystem_watcher feature will \ - add this functionality."; diff --git a/crates/bevy_gltf/src/loader.rs b/crates/bevy_gltf/src/loader.rs index 87fb8c6318b44..65ba1dbf9e5d1 100644 --- a/crates/bevy_gltf/src/loader.rs +++ b/crates/bevy_gltf/src/loader.rs @@ -1206,7 +1206,7 @@ async fn load_buffers( Err(()) => { // TODO: Remove this and add dep let buffer_path = load_context.path().parent().unwrap().join(uri); - load_context.read_asset_bytes(&buffer_path).await? + load_context.read_asset_bytes(buffer_path).await? } }; buffer_data.push(buffer_bytes); diff --git a/crates/bevy_internal/Cargo.toml b/crates/bevy_internal/Cargo.toml index c92a3618ebbdc..9644e87141acf 100644 --- a/crates/bevy_internal/Cargo.toml +++ b/crates/bevy_internal/Cargo.toml @@ -103,7 +103,10 @@ glam_assert = ["bevy_math/glam_assert"] default_font = ["bevy_text?/default_font"] # Enables watching the filesystem for Bevy Asset hot-reloading -filesystem_watcher = ["bevy_asset?/filesystem_watcher"] +file_watcher = ["bevy_asset?/file_watcher"] + +# Enables watching embedded files for Bevy Asset hot-reloading +embedded_watcher = ["bevy_asset?/embedded_watcher"] [dependencies] # bevy diff --git a/crates/bevy_tasks/src/single_threaded_task_pool.rs b/crates/bevy_tasks/src/single_threaded_task_pool.rs index 36e38df5a7970..9555a6a470f7c 100644 --- a/crates/bevy_tasks/src/single_threaded_task_pool.rs +++ b/crates/bevy_tasks/src/single_threaded_task_pool.rs @@ -12,7 +12,7 @@ pub struct TaskPoolBuilder {} /// This is a dummy struct for wasm support to provide the same api as with the multithreaded /// task pool. In the case of the multithreaded task pool this struct is used to spawn /// tasks on a specific thread. But the wasm task pool just calls -/// [`wasm_bindgen_futures::spawn_local`] for spawning which just runs tasks on the main thread +/// `wasm_bindgen_futures::spawn_local` for spawning which just runs tasks on the main thread /// and so the [`ThreadExecutor`] does nothing. #[derive(Default)] pub struct ThreadExecutor<'a>(PhantomData<&'a ()>); @@ -159,7 +159,7 @@ impl TaskPool { FakeTask } - /// Spawns a static future on the JS event loop. This is exactly the same as [`TaskSpool::spawn`]. + /// Spawns a static future on the JS event loop. This is exactly the same as [`TaskPool::spawn`]. pub fn spawn_local(&self, future: impl Future + 'static) -> FakeTask where T: 'static, diff --git a/crates/bevy_utils/src/cow_arc.rs b/crates/bevy_utils/src/cow_arc.rs index 31a204863d3d3..c78318323588b 100644 --- a/crates/bevy_utils/src/cow_arc.rs +++ b/crates/bevy_utils/src/cow_arc.rs @@ -42,7 +42,7 @@ where &'a T: Into>, { /// Converts this into an "owned" value. If internally a value is borrowed, it will be cloned into an "owned [`Arc`]". - /// If it is already an "owned [`Arc`]", it will remain unchanged. + /// If it is already a [`CowArc::Owned`] or a [`CowArc::Static`], it will remain unchanged. #[inline] pub fn into_owned(self) -> CowArc<'static, T> { match self { @@ -51,6 +51,14 @@ where CowArc::Owned(value) => CowArc::Owned(value), } } + + /// Clones into an owned [`CowArc<'static>`]. If internally a value is borrowed, it will be cloned into an "owned [`Arc`]". + /// If it is already a [`CowArc::Owned`] or [`CowArc::Static`], the value will be cloned. + /// This is equivalent to `.clone().into_owned()`. + #[inline] + pub fn clone_owned(&self) -> CowArc<'static, T> { + self.clone().into_owned() + } } impl<'a, T: ?Sized> Clone for CowArc<'a, T> { diff --git a/docs/cargo_features.md b/docs/cargo_features.md index f3d618fb0da5e..f17962cc2dbf8 100644 --- a/docs/cargo_features.md +++ b/docs/cargo_features.md @@ -51,8 +51,9 @@ The default feature set enables most of the expected features of a game engine, |dds|DDS compressed texture support| |detailed_trace|Enable detailed trace event logging. These trace events are expensive even when off, thus they require compile time opt-in| |dynamic_linking|Force dynamic linking, which improves iterative compile times| +|embedded_watcher|Enables watching in memory asset providers for Bevy Asset hot-reloading| |exr|EXR image format support| -|filesystem_watcher|Enables watching the filesystem for Bevy Asset hot-reloading| +|file_watcher|Enables watching the filesystem for Bevy Asset hot-reloading| |flac|FLAC audio format support| |glam_assert|Enable assertions to check the validity of parameters passed to glam| |jpeg|JPEG image format support| diff --git a/examples/asset/custom_asset_reader.rs b/examples/asset/custom_asset_reader.rs index 99ad8fe07e973..3064cd12593c6 100644 --- a/examples/asset/custom_asset_reader.rs +++ b/examples/asset/custom_asset_reader.rs @@ -4,7 +4,7 @@ use bevy::{ asset::io::{ - file::FileAssetReader, AssetProvider, AssetProviders, AssetReader, AssetReaderError, + file::FileAssetReader, AssetReader, AssetReaderError, AssetSource, AssetSourceId, PathStream, Reader, }, prelude::*, @@ -43,13 +43,6 @@ impl AssetReader for CustomAssetReader { ) -> BoxedFuture<'a, Result> { self.0.is_directory(path) } - - fn watch_for_changes( - &self, - event_sender: crossbeam_channel::Sender, - ) -> Option> { - self.0.watch_for_changes(event_sender) - } } /// A plugins that registers our new asset reader @@ -57,24 +50,17 @@ struct CustomAssetReaderPlugin; impl Plugin for CustomAssetReaderPlugin { fn build(&self, app: &mut App) { - let mut asset_providers = app - .world - .get_resource_or_insert_with::(Default::default); - asset_providers.insert_reader("CustomAssetReader", || { - Box::new(CustomAssetReader(FileAssetReader::new("assets"))) - }); + app.register_asset_source( + AssetSourceId::Default, + AssetSource::build() + .with_reader(|| Box::new(CustomAssetReader(FileAssetReader::new("assets")))), + ); } } fn main() { App::new() - .add_plugins(( - CustomAssetReaderPlugin, - DefaultPlugins.set(AssetPlugin::Unprocessed { - source: AssetProvider::Custom("CustomAssetReader".to_string()), - watch_for_changes: false, - }), - )) + .add_plugins((CustomAssetReaderPlugin, DefaultPlugins)) .add_systems(Startup, setup) .run(); } diff --git a/examples/asset/hot_asset_reloading.rs b/examples/asset/hot_asset_reloading.rs index 2a97bc7093244..b764006ad71fb 100644 --- a/examples/asset/hot_asset_reloading.rs +++ b/examples/asset/hot_asset_reloading.rs @@ -1,12 +1,15 @@ //! Hot reloading allows you to modify assets files to be immediately reloaded while your game is //! running. This lets you immediately see the results of your changes without restarting the game. //! This example illustrates hot reloading mesh changes. +//! +//! Note that hot asset reloading requires the [`AssetWatcher`](bevy::asset::io::AssetWatcher) to be enabled +//! for your current platform. For desktop platforms, enable the `file_watcher` cargo feature. use bevy::prelude::*; fn main() { App::new() - .add_plugins(DefaultPlugins.set(AssetPlugin::default().watch_for_changes())) + .add_plugins(DefaultPlugins) .add_systems(Startup, setup) .run(); } diff --git a/examples/asset/processing/e.txt b/examples/asset/processing/e.txt new file mode 100644 index 0000000000000..9cbe6ea56f225 --- /dev/null +++ b/examples/asset/processing/e.txt @@ -0,0 +1 @@ +e \ No newline at end of file diff --git a/examples/asset/processing/processing.rs b/examples/asset/processing/processing.rs index b4deedd59036e..b0b3912b5a471 100644 --- a/examples/asset/processing/processing.rs +++ b/examples/asset/processing/processing.rs @@ -2,7 +2,8 @@ use bevy::{ asset::{ - io::{AssetProviders, Reader, Writer}, + embedded_asset, + io::{Reader, Writer}, processor::LoadAndSave, saver::{AssetSaver, SavedAsset}, AssetLoader, AsyncReadExt, AsyncWriteExt, LoadContext, @@ -16,15 +17,6 @@ use thiserror::Error; fn main() { App::new() - .insert_resource( - // This is just overriding the default paths to scope this to the correct example folder - // You can generally skip this in your own projects - AssetProviders::default() - .with_default_file_source("examples/asset/processing/assets".to_string()) - .with_default_file_destination( - "examples/asset/processing/imported_assets".to_string(), - ), - ) // Enabling `processed_dev` will configure the AssetPlugin to use asset processing. // This will run the AssetProcessor in the background, which will listen for changes to // the `assets` folder, run them through configured asset processors, and write the results @@ -32,9 +24,18 @@ fn main() { // // The AssetProcessor will create `.meta` files automatically for assets in the `assets` folder, // which can then be used to configure how the asset will be processed. - .add_plugins((DefaultPlugins.set(AssetPlugin::processed_dev()), TextPlugin)) - // This is what a deployed app should use - // .add_plugins((DefaultPlugins.set(AssetPlugin::processed()), TextPlugin)) + .add_plugins(( + DefaultPlugins.set(AssetPlugin { + // This is just overriding the default paths to scope this to the correct example folder + // You can generally skip this in your own projects + mode: AssetMode::ProcessedDev, + file_path: "examples/asset/processing/assets".to_string(), + processed_file_path: "examples/asset/processing/imported_assets/Default" + .to_string(), + ..default() + }), + TextPlugin, + )) .add_systems(Startup, setup) .add_systems(Update, print_text) .run(); @@ -51,6 +52,7 @@ pub struct TextPlugin; impl Plugin for TextPlugin { fn build(&self, app: &mut App) { + embedded_asset!(app, "examples/asset/processing/", "e.txt"); app.init_asset::() .init_asset::() .register_asset_loader(CoolTextLoader) @@ -199,6 +201,7 @@ struct TextAssets { b: Handle, c: Handle, d: Handle, + e: Handle, } fn setup(mut commands: Commands, assets: Res) { @@ -209,6 +212,7 @@ fn setup(mut commands: Commands, assets: Res) { b: assets.load("foo/b.cool.ron"), c: assets.load("foo/c.cool.ron"), d: assets.load("d.cool.ron"), + e: assets.load("embedded://asset_processing/e.txt"), }); } @@ -220,6 +224,7 @@ fn print_text(handles: Res, texts: Res>) { println!(" b: {:?}", texts.get(&handles.b)); println!(" c: {:?}", texts.get(&handles.c)); println!(" d: {:?}", texts.get(&handles.d)); + println!(" e: {:?}", texts.get(&handles.e)); println!("(You can modify source assets and their .meta files to hot-reload changes!)"); println!(); } diff --git a/examples/scene/scene.rs b/examples/scene/scene.rs index da09648c244e3..7f3af48996323 100644 --- a/examples/scene/scene.rs +++ b/examples/scene/scene.rs @@ -4,7 +4,7 @@ use std::{fs::File, io::Write}; fn main() { App::new() - .add_plugins(DefaultPlugins.set(AssetPlugin::default().watch_for_changes())) + .add_plugins(DefaultPlugins) .register_type::() .register_type::() .register_type::() @@ -75,7 +75,8 @@ fn load_scene_system(mut commands: Commands, asset_server: Res) { } // This system logs all ComponentA components in our world. Try making a change to a ComponentA in -// load_scene_example.scn. You should immediately see the changes appear in the console. +// load_scene_example.scn. If you enable the `file_watcher` cargo feature you should immediately see +// the changes appear in the console whenever you make a change. fn log_system( query: Query<(Entity, &ComponentA), Changed>, res: Option>, diff --git a/examples/shader/post_processing.rs b/examples/shader/post_processing.rs index d789eaa48b023..94d3096e6a7c4 100644 --- a/examples/shader/post_processing.rs +++ b/examples/shader/post_processing.rs @@ -36,10 +36,7 @@ use bevy::{ fn main() { App::new() - .add_plugins(( - DefaultPlugins.set(AssetPlugin::default().watch_for_changes()), - PostProcessPlugin, - )) + .add_plugins((DefaultPlugins, PostProcessPlugin)) .add_systems(Startup, setup) .add_systems(Update, (rotate, update_settings)) .run(); diff --git a/examples/tools/scene_viewer/main.rs b/examples/tools/scene_viewer/main.rs index 2dbfc97b1c66d..1f53b5f174fa7 100644 --- a/examples/tools/scene_viewer/main.rs +++ b/examples/tools/scene_viewer/main.rs @@ -4,9 +4,10 @@ //! replacing the path as appropriate. //! In case of multiple scenes, you can select which to display by adapting the file path: `/path/to/model.gltf#Scene1`. //! With no arguments it will load the `FlightHelmet` glTF model from the repository assets subdirectory. +//! +//! If you want to hot reload asset changes, enable the `file_watcher` cargo feature. use bevy::{ - asset::io::AssetProviders, math::Vec3A, prelude::*, render::primitives::{Aabb, Sphere}, @@ -29,9 +30,6 @@ fn main() { color: Color::WHITE, brightness: 1.0 / 5.0f32, }) - .insert_resource(AssetProviders::default().with_default_file_source( - std::env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".to_string()), - )) .add_plugins(( DefaultPlugins .set(WindowPlugin { @@ -41,7 +39,10 @@ fn main() { }), ..default() }) - .set(AssetPlugin::default().watch_for_changes()), + .set(AssetPlugin { + file_path: std::env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".to_string()), + ..default() + }), CameraControllerPlugin, SceneViewerPlugin, MorphViewerPlugin, From 4ae6a66481b1b1eab8039f8406bb73210ed4c689 Mon Sep 17 00:00:00 2001 From: Thomas Wilgenbus Date: Sat, 14 Oct 2023 18:07:49 +0200 Subject: [PATCH 03/63] Allow optional extraction of resources from the main world (#10109) # Objective From my understanding, although resources are not meant to be created and removed at every frame, they are still meant to be created dynamically during the lifetime of the App. But because the extract_resource API does not allow optional resources from the main world, it's impossible to use resources in the render phase that were not created before the render sub-app itself. ## Solution Because the ECS engine already allows for system parameters to be `Option`, it just had to be added. --- ## Changelog - Changed - `extract_resource` now takes an optional main world resource - Fixed - `ExtractResourcePlugin` doesn't cause panics anymore if the resource is not already inserted --- crates/bevy_render/src/extract_resource.rs | 30 ++++++++++++---------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/crates/bevy_render/src/extract_resource.rs b/crates/bevy_render/src/extract_resource.rs index 4233ef61b5d61..37a21f45bf84c 100644 --- a/crates/bevy_render/src/extract_resource.rs +++ b/crates/bevy_render/src/extract_resource.rs @@ -40,24 +40,26 @@ impl Plugin for ExtractResourcePlugin { /// This system extracts the resource of the corresponding [`Resource`] type pub fn extract_resource( mut commands: Commands, - main_resource: Extract>, + main_resource: Extract>>, target_resource: Option>, #[cfg(debug_assertions)] mut has_warned_on_remove: Local, ) { - if let Some(mut target_resource) = target_resource { - if main_resource.is_changed() { - *target_resource = R::extract_resource(&main_resource); - } - } else { - #[cfg(debug_assertions)] - if !main_resource.is_added() && !*has_warned_on_remove { - *has_warned_on_remove = true; - bevy_log::warn!( - "Removing resource {} from render world not expected, adding using `Commands`. + if let Some(main_resource) = main_resource.as_ref() { + if let Some(mut target_resource) = target_resource { + if main_resource.is_changed() { + *target_resource = R::extract_resource(main_resource); + } + } else { + #[cfg(debug_assertions)] + if !main_resource.is_added() && !*has_warned_on_remove { + *has_warned_on_remove = true; + bevy_log::warn!( + "Removing resource {} from render world not expected, adding using `Commands`. This may decrease performance", - std::any::type_name::() - ); + std::any::type_name::() + ); + } + commands.insert_resource(R::extract_resource(main_resource)); } - commands.insert_resource(R::extract_resource(&main_resource)); } } From d9a0761eb24b4b09d0695e0e940c4172ce28d267 Mon Sep 17 00:00:00 2001 From: robtfm <50659922+robtfm@users.noreply.github.com> Date: Sat, 14 Oct 2023 19:03:11 +0100 Subject: [PATCH 04/63] ssao use unlit_color instead of white (#10117) # Objective #10105 changed the ssao input color from the material base color to white. i can't actually see a difference in the example but there should be one in some cases. ## Solution change it back. --- crates/bevy_pbr/src/render/pbr.wgsl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_pbr/src/render/pbr.wgsl b/crates/bevy_pbr/src/render/pbr.wgsl index dc84ee30dfa5f..6b9113f11dd6f 100644 --- a/crates/bevy_pbr/src/render/pbr.wgsl +++ b/crates/bevy_pbr/src/render/pbr.wgsl @@ -124,7 +124,7 @@ fn fragment( #endif #ifdef SCREEN_SPACE_AMBIENT_OCCLUSION let ssao = textureLoad(screen_space_ambient_occlusion_texture, vec2(in.position.xy), 0i).r; - let ssao_multibounce = gtao_multibounce(ssao, pbr_input.material.base_color.rgb); + let ssao_multibounce = gtao_multibounce(ssao, unlit_color.rgb); occlusion = min(occlusion, ssao_multibounce); #endif pbr_input.occlusion = occlusion; From ca37b92540d657364011e79683c5e699748c19d1 Mon Sep 17 00:00:00 2001 From: Dworv <88643996+Dworv@users.noreply.github.com> Date: Sat, 14 Oct 2023 15:54:57 -0700 Subject: [PATCH 05/63] Make loading warning for no file ext more descriptive (#10119) # Objective Currently, the asset loader outputs ``` 2023-10-14T15:11:09.328850Z WARN bevy_asset::asset_server: no `AssetLoader` found ``` when user forgets to add an extension to a file. This is very confusing behaviour, it sounds like there aren't any asset loaders existing. ## Solution Add an extra message on the end when there are no file extensions. --- crates/bevy_asset/src/server/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_asset/src/server/mod.rs b/crates/bevy_asset/src/server/mod.rs index 16d087331b3c2..5b685e03678df 100644 --- a/crates/bevy_asset/src/server/mod.rs +++ b/crates/bevy_asset/src/server/mod.rs @@ -956,7 +956,7 @@ fn format_missing_asset_ext(exts: &[String]) -> String { exts.join(", ") ) } else { - String::new() + " for file with no extension".to_string() } } From b9ddb37d5ba311ab565ae89f6945c8b5327a2243 Mon Sep 17 00:00:00 2001 From: "Ame :]" <104745335+ameknite@users.noreply.github.com> Date: Sat, 14 Oct 2023 19:52:31 -0500 Subject: [PATCH 06/63] add and fix shields in Readmes (#9993) # Objective Fix shields ## Solution - Correct shield in the Bevy ECS Readme, where only the MIT license is displayed ![Screenshot 2023-10-01 at 18 28 27](https://github.com/bevyengine/bevy/assets/104745335/a736a65e-0d47-4d9e-b32d-0b843a00922c) - Add shields to other Readmes. - homogenize shields and titles. --- README.md | 7 ++++--- crates/bevy_app/README.md | 9 +++++++++ crates/bevy_ecs/README.md | 4 +++- crates/bevy_mikktspace/README.md | 8 +++++++- crates/bevy_ptr/README.md | 8 +++++++- crates/bevy_reflect/README.md | 6 ++++++ crates/bevy_tasks/README.md | 8 +++++++- crates/bevy_utils/Readme.md | 9 +++++++++ 8 files changed, 52 insertions(+), 7 deletions(-) create mode 100644 crates/bevy_app/README.md create mode 100644 crates/bevy_utils/Readme.md diff --git a/README.md b/README.md index 9820b70393c6e..7e527742ad855 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,10 @@ # [![Bevy](assets/branding/bevy_logo_light_dark_and_dimmed.svg)](https://bevyengine.org) +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) [![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy) -[![MIT/Apache 2.0](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) -[![Crates.io](https://img.shields.io/crates/d/bevy.svg)](https://crates.io/crates/bevy) -[![Rust](https://github.com/bevyengine/bevy/workflows/CI/badge.svg)](https://github.com/bevyengine/bevy/actions) +[![Downloads](https://img.shields.io/crates/d/bevy.svg)](https://crates.io/crates/bevy) +[![Docs](https://docs.rs/bevy/badge.svg)](https://docs.rs/bevy/latest/bevy/) +[![CI](https://github.com/bevyengine/bevy/workflows/CI/badge.svg)](https://github.com/bevyengine/bevy/actions) [![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) ## What is Bevy? diff --git a/crates/bevy_app/README.md b/crates/bevy_app/README.md new file mode 100644 index 0000000000000..e38f98875029f --- /dev/null +++ b/crates/bevy_app/README.md @@ -0,0 +1,9 @@ +# Bevy App + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy_app) +[![Downloads](https://img.shields.io/crates/d/bevy_app.svg)](https://crates.io/crates/bevy_app) +[![Docs](https://docs.rs/bevy_app/badge.svg)](https://docs.rs/bevy_app/latest/bevy_app/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) + +This crate is about everything concerning the highest-level, application layer of a [Bevy](https://crates.io/crates/bevy) app. diff --git a/crates/bevy_ecs/README.md b/crates/bevy_ecs/README.md index 6bdee54c71b5d..2763970807961 100644 --- a/crates/bevy_ecs/README.md +++ b/crates/bevy_ecs/README.md @@ -1,7 +1,9 @@ # Bevy ECS +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) [![Crates.io](https://img.shields.io/crates/v/bevy_ecs.svg)](https://crates.io/crates/bevy_ecs) -[![license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/bevyengine/bevy/blob/HEAD/LICENSE) +[![Downloads](https://img.shields.io/crates/d/bevy_ecs.svg)](https://crates.io/crates/bevy_ecs) +[![Docs](https://docs.rs/bevy_ecs/badge.svg)](https://docs.rs/bevy_ecs/latest/bevy_ecs/) [![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) ## What is Bevy ECS? diff --git a/crates/bevy_mikktspace/README.md b/crates/bevy_mikktspace/README.md index b5f886ce7abf5..7ac119d674ce2 100644 --- a/crates/bevy_mikktspace/README.md +++ b/crates/bevy_mikktspace/README.md @@ -1,4 +1,10 @@ -# bevy_mikktspace +# Bevy Mikktspace + +[![License](https://img.shields.io/badge/license-MIT%2FApache%2FZlib-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy_mikktspace) +[![Downloads](https://img.shields.io/crates/d/bevy_mikktspace.svg)](https://crates.io/crates/bevy_mikktspace) +[![Docs](https://docs.rs/bevy_mikktspace/badge.svg)](https://docs.rs/bevy_mikktspace/latest/bevy_mikktspace/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) This is a fork of [https://github.com/gltf-rs/mikktspace](https://github.com/gltf-rs/mikktspace), which in turn is a port of the Mikkelsen Tangent Space Algorithm reference implementation to Rust. It has been forked for use in the bevy game engine to be able to update maths crate dependencies in lock-step with bevy releases. It is vendored in the bevy repository itself as [crates/bevy_mikktspace](https://github.com/bevyengine/bevy/tree/main/crates/bevy_mikktspace). diff --git a/crates/bevy_ptr/README.md b/crates/bevy_ptr/README.md index a08c3043ec391..c78a6ac544635 100644 --- a/crates/bevy_ptr/README.md +++ b/crates/bevy_ptr/README.md @@ -1,4 +1,10 @@ -# `bevy_ptr` +# Bevy Ptr + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy_ptr.svg)](https://crates.io/crates/bevy_ptr) +[![Downloads](https://img.shields.io/crates/d/bevy_ptr.svg)](https://crates.io/crates/bevy_ptr) +[![Docs](https://docs.rs/bevy_ptr/badge.svg)](https://docs.rs/bevy_ptr/latest/bevy_ptr/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) The `bevy_ptr` crate provides low-level abstractions for working with pointers in a more safe way than using rust's raw pointers. diff --git a/crates/bevy_reflect/README.md b/crates/bevy_reflect/README.md index dba4b0f2430f8..4289f869fff91 100644 --- a/crates/bevy_reflect/README.md +++ b/crates/bevy_reflect/README.md @@ -1,5 +1,11 @@ # Bevy Reflect +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy_reflect) +[![Downloads](https://img.shields.io/crates/d/bevy_reflect.svg)](https://crates.io/crates/bevy_reflect) +[![Docs](https://docs.rs/bevy_reflect/badge.svg)](https://docs.rs/bevy_reflect/latest/bevy_reflect/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) + This crate enables you to dynamically interact with Rust types: * Derive the Reflect traits diff --git a/crates/bevy_tasks/README.md b/crates/bevy_tasks/README.md index 233d6794c7d32..1d1a7fb90465b 100644 --- a/crates/bevy_tasks/README.md +++ b/crates/bevy_tasks/README.md @@ -1,4 +1,10 @@ -# bevy_tasks +# Bevy Tasks + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy_tasks) +[![Downloads](https://img.shields.io/crates/d/bevy_tasks.svg)](https://crates.io/crates/bevy_tasks) +[![Docs](https://docs.rs/bevy_tasks/badge.svg)](https://docs.rs/bevy_tasks/latest/bevy_tasks/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) A refreshingly simple task executor for bevy. :) diff --git a/crates/bevy_utils/Readme.md b/crates/bevy_utils/Readme.md new file mode 100644 index 0000000000000..a1eb221eb6a4e --- /dev/null +++ b/crates/bevy_utils/Readme.md @@ -0,0 +1,9 @@ +# Bevy Utils + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy.svg)](https://crates.io/crates/bevy_utils) +[![Downloads](https://img.shields.io/crates/d/bevy_utils.svg)](https://crates.io/crates/bevy_utils) +[![Docs](https://docs.rs/bevy_utils/badge.svg)](https://docs.rs/bevy_utils/latest/bevy_utils/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) + +A Collection of Utilities for the [Bevy Engine](https://bevyengine.org/). From 56eb362327158a7c131a6e9b32f253fa9e6ef437 Mon Sep 17 00:00:00 2001 From: Dimitri Belopopsky Date: Sun, 15 Oct 2023 16:37:53 +0200 Subject: [PATCH 07/63] Fix missing explicit lifetime name for copy_deferred_lighting_id name (#10128) # Objective On nightly there is a warning on a missing lifetime: ```bash warning: `&` without an explicit lifetime name cannot be used here ``` The details are in https://github.com/rust-lang/rust/issues/115010, but the bottom line is that in associated constants elided lifetimes are no longer allowed to be implicitly defined. This fixes the only place where it is missing. ## Solution - Add explicit `'static` lifetime --- crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs b/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs index cb68e472035cb..5609896045ec8 100644 --- a/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs +++ b/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs @@ -60,7 +60,7 @@ impl Plugin for CopyDeferredLightingIdPlugin { #[derive(Default)] pub struct CopyDeferredLightingIdNode; impl CopyDeferredLightingIdNode { - pub const NAME: &str = "copy_deferred_lighting_id"; + pub const NAME: &'static str = "copy_deferred_lighting_id"; } impl ViewNode for CopyDeferredLightingIdNode { From 3866b1cc19399f54d00c44f275d40b0ec697e297 Mon Sep 17 00:00:00 2001 From: Carter Anderson Date: Sun, 15 Oct 2023 09:57:46 -0700 Subject: [PATCH 08/63] Fix load_folder for non-default Asset Sources (#10121) # Objective Fixes #10120 ## Solution Assign the folder path source to loaded descendant asset paths in `load_folder` --- crates/bevy_asset/src/server/mod.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/bevy_asset/src/server/mod.rs b/crates/bevy_asset/src/server/mod.rs index 5b685e03678df..257813acd61a2 100644 --- a/crates/bevy_asset/src/server/mod.rs +++ b/crates/bevy_asset/src/server/mod.rs @@ -463,6 +463,7 @@ impl AssetServer { let path = path.into().into_owned(); fn load_folder<'a>( + source: AssetSourceId<'static>, path: &'a Path, reader: &'a dyn AssetReader, server: &'a AssetServer, @@ -474,10 +475,12 @@ impl AssetServer { let mut path_stream = reader.read_directory(path.as_ref()).await?; while let Some(child_path) = path_stream.next().await { if reader.is_directory(&child_path).await? { - load_folder(&child_path, reader, server, handles).await?; + load_folder(source.clone(), &child_path, reader, server, handles) + .await?; } else { let path = child_path.to_str().expect("Path should be a valid string."); - match server.load_untyped_async(AssetPath::parse(path)).await { + let asset_path = AssetPath::parse(path).with_source(source.clone()); + match server.load_untyped_async(asset_path).await { Ok(handle) => handles.push(handle), // skip assets that cannot be loaded Err( @@ -519,7 +522,7 @@ impl AssetServer { }; let mut handles = Vec::new(); - match load_folder(path.path(), asset_reader, &server, &mut handles).await { + match load_folder(source.id(), path.path(), asset_reader, &server, &mut handles).await { Ok(_) => server.send_asset_event(InternalAssetEvent::Loaded { id, loaded_asset: LoadedAsset::new_with_dependencies( From 5781806e72548e1a37d8492c47ad420e7ad86dd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois?= Date: Sun, 15 Oct 2023 20:20:29 +0200 Subject: [PATCH 09/63] only set up processed source if asset plugin is not unprocessed (#10123) # Objective - Since #9885, running on an iOS device crashes trying to create the processed folder - This only happens on real device, not on the simulator ## Solution - Setup processed assets only if needed --- crates/bevy_asset/src/io/source.rs | 25 +++++++++++++++---------- crates/bevy_asset/src/lib.rs | 6 +++++- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/crates/bevy_asset/src/io/source.rs b/crates/bevy_asset/src/io/source.rs index ea07f8d39a4f3..8ee192462a31c 100644 --- a/crates/bevy_asset/src/io/source.rs +++ b/crates/bevy_asset/src/io/source.rs @@ -241,20 +241,25 @@ impl AssetSourceBuilder { /// Returns a builder containing the "platform default source" for the given `path` and `processed_path`. /// For most platforms, this will use [`FileAssetReader`](crate::io::file::FileAssetReader) / [`FileAssetWriter`](crate::io::file::FileAssetWriter), /// but some platforms (such as Android) have their own default readers / writers / watchers. - pub fn platform_default(path: &str, processed_path: &str) -> Self { - Self::default() + pub fn platform_default(path: &str, processed_path: Option<&str>) -> Self { + let default = Self::default() .with_reader(AssetSource::get_default_reader(path.to_string())) .with_writer(AssetSource::get_default_writer(path.to_string())) .with_watcher(AssetSource::get_default_watcher( path.to_string(), Duration::from_millis(300), - )) - .with_processed_reader(AssetSource::get_default_reader(processed_path.to_string())) - .with_processed_writer(AssetSource::get_default_writer(processed_path.to_string())) - .with_processed_watcher(AssetSource::get_default_watcher( - processed_path.to_string(), - Duration::from_millis(300), - )) + )); + if let Some(processed_path) = processed_path { + default + .with_processed_reader(AssetSource::get_default_reader(processed_path.to_string())) + .with_processed_writer(AssetSource::get_default_writer(processed_path.to_string())) + .with_processed_watcher(AssetSource::get_default_watcher( + processed_path.to_string(), + Duration::from_millis(300), + )) + } else { + default + } } } @@ -315,7 +320,7 @@ impl AssetSourceBuilders { } /// Initializes the default [`AssetSourceBuilder`] if it has not already been set. - pub fn init_default_source(&mut self, path: &str, processed_path: &str) { + pub fn init_default_source(&mut self, path: &str, processed_path: Option<&str>) { self.default .get_or_insert_with(|| AssetSourceBuilder::platform_default(path, processed_path)); } diff --git a/crates/bevy_asset/src/lib.rs b/crates/bevy_asset/src/lib.rs index 148dc40e56d01..66af7f0216b48 100644 --- a/crates/bevy_asset/src/lib.rs +++ b/crates/bevy_asset/src/lib.rs @@ -122,7 +122,11 @@ impl Plugin for AssetPlugin { let mut sources = app .world .get_resource_or_insert_with::(Default::default); - sources.init_default_source(&self.file_path, &self.processed_file_path); + sources.init_default_source( + &self.file_path, + (!matches!(self.mode, AssetMode::Unprocessed)) + .then_some(self.processed_file_path.as_str()), + ); embedded.register_source(&mut sources); } { From 49d5c6b8a361a209da07dc774e33fe44b65e9dfb Mon Sep 17 00:00:00 2001 From: Carter Anderson Date: Sun, 15 Oct 2023 11:21:49 -0700 Subject: [PATCH 10/63] Hot reload labeled assets whose source asset is not loaded (#9736) # Objective As called out in #9714, Bevy Asset V2 fails to hot-reload labeled assets whose source asset has changed (in cases where the root asset is not alive). ## Solution Track alive labeled assets for a given source asset and allow hot reloads in cases where a labeled asset is still alive. --- crates/bevy_asset/src/server/info.rs | 53 +++++++++++++++++++++++++++- crates/bevy_asset/src/server/mod.rs | 2 +- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/crates/bevy_asset/src/server/info.rs b/crates/bevy_asset/src/server/info.rs index baa4007001e95..f41057f6221e9 100644 --- a/crates/bevy_asset/src/server/info.rs +++ b/crates/bevy_asset/src/server/info.rs @@ -69,6 +69,9 @@ pub(crate) struct AssetInfos { /// Tracks assets that depend on the "key" asset path inside their asset loaders ("loader dependencies") /// This should only be set when watching for changes to avoid unnecessary work. pub(crate) loader_dependants: HashMap, HashSet>>, + /// Tracks living labeled assets for a given source asset. + /// This should only be set when watching for changes to avoid unnecessary work. + pub(crate) living_labeled_assets: HashMap, HashSet>, pub(crate) handle_providers: HashMap, pub(crate) dependency_loaded_event_sender: HashMap, } @@ -88,6 +91,8 @@ impl AssetInfos { Self::create_handle_internal( &mut self.infos, &self.handle_providers, + &mut self.living_labeled_assets, + self.watching_for_changes, TypeId::of::(), None, None, @@ -107,6 +112,8 @@ impl AssetInfos { Self::create_handle_internal( &mut self.infos, &self.handle_providers, + &mut self.living_labeled_assets, + self.watching_for_changes, type_id, None, None, @@ -116,9 +123,12 @@ impl AssetInfos { ) } + #[allow(clippy::too_many_arguments)] fn create_handle_internal( infos: &mut HashMap, handle_providers: &HashMap, + living_labeled_assets: &mut HashMap, HashSet>, + watching_for_changes: bool, type_id: TypeId, path: Option>, meta_transform: Option, @@ -128,6 +138,16 @@ impl AssetInfos { .get(&type_id) .ok_or(MissingHandleProviderError(type_id))?; + if watching_for_changes { + if let Some(path) = &path { + let mut without_label = path.to_owned(); + if let Some(label) = without_label.take_label() { + let labels = living_labeled_assets.entry(without_label).or_default(); + labels.insert(label.to_string()); + } + } + } + let handle = provider.reserve_handle_internal(true, path.clone(), meta_transform); let mut info = AssetInfo::new(Arc::downgrade(&handle), path); if loading { @@ -136,6 +156,7 @@ impl AssetInfos { info.rec_dep_load_state = RecursiveDependencyLoadState::Loading; } infos.insert(handle.id, info); + Ok(UntypedHandle::Strong(handle)) } @@ -226,6 +247,8 @@ impl AssetInfos { let handle = Self::create_handle_internal( &mut self.infos, &self.handle_providers, + &mut self.living_labeled_assets, + self.watching_for_changes, type_id, Some(path), meta_transform, @@ -256,7 +279,7 @@ impl AssetInfos { Some(UntypedHandle::Strong(strong_handle)) } - /// Returns `true` if this path has + /// Returns `true` if the asset this path points to is still alive pub(crate) fn is_path_alive<'a>(&self, path: impl Into>) -> bool { let path = path.into(); if let Some(id) = self.path_to_id.get(&path) { @@ -267,12 +290,26 @@ impl AssetInfos { false } + /// Returns `true` if the asset at this path should be reloaded + pub(crate) fn should_reload(&self, path: &AssetPath) -> bool { + if self.is_path_alive(path) { + return true; + } + + if let Some(living) = self.living_labeled_assets.get(path) { + !living.is_empty() + } else { + false + } + } + // Returns `true` if the asset should be removed from the collection pub(crate) fn process_handle_drop(&mut self, id: UntypedAssetId) -> bool { Self::process_handle_drop_internal( &mut self.infos, &mut self.path_to_id, &mut self.loader_dependants, + &mut self.living_labeled_assets, self.watching_for_changes, id, ) @@ -521,6 +558,7 @@ impl AssetInfos { infos: &mut HashMap, path_to_id: &mut HashMap, UntypedAssetId>, loader_dependants: &mut HashMap, HashSet>>, + living_labeled_assets: &mut HashMap, HashSet>, watching_for_changes: bool, id: UntypedAssetId, ) -> bool { @@ -540,6 +578,18 @@ impl AssetInfos { dependants.remove(&path); } } + if let Some(label) = path.label() { + let mut without_label = path.to_owned(); + without_label.remove_label(); + if let Entry::Occupied(mut entry) = + living_labeled_assets.entry(without_label) + { + entry.get_mut().remove(label); + if entry.get().is_empty() { + entry.remove(); + } + }; + } } path_to_id.remove(&path); } @@ -566,6 +616,7 @@ impl AssetInfos { &mut self.infos, &mut self.path_to_id, &mut self.loader_dependants, + &mut self.living_labeled_assets, self.watching_for_changes, id.untyped(provider.type_id), ); diff --git a/crates/bevy_asset/src/server/mod.rs b/crates/bevy_asset/src/server/mod.rs index 257813acd61a2..03cf286349214 100644 --- a/crates/bevy_asset/src/server/mod.rs +++ b/crates/bevy_asset/src/server/mod.rs @@ -395,7 +395,7 @@ impl AssetServer { let path = path.into().into_owned(); IoTaskPool::get() .spawn(async move { - if server.data.infos.read().is_path_alive(&path) { + if server.data.infos.read().should_reload(&path) { info!("Reloading {path} because it has changed"); if let Err(err) = server.load_internal(None, path, true, None).await { error!("{}", err); From 02943737b25add58262f95346969ea068d491601 Mon Sep 17 00:00:00 2001 From: Carter Anderson Date: Sun, 15 Oct 2023 11:36:51 -0700 Subject: [PATCH 11/63] Return an error when loading non-existent labels (#9751) # Objective Calling `asset_server.load("scene.gltf#SomeLabel")` will silently fail if `SomeLabel` does not exist. Referenced in #9714 ## Solution We now detect this case and return an error. I also slightly refactored `load_internal` to make the logic / dataflow much clearer. --------- Co-authored-by: Pascal Hertleif --- crates/bevy_asset/src/path.rs | 6 +++ crates/bevy_asset/src/server/mod.rs | 72 +++++++++++++++++------------ 2 files changed, 48 insertions(+), 30 deletions(-) diff --git a/crates/bevy_asset/src/path.rs b/crates/bevy_asset/src/path.rs index 11168ca245b50..43b7d2cab5c77 100644 --- a/crates/bevy_asset/src/path.rs +++ b/crates/bevy_asset/src/path.rs @@ -203,6 +203,12 @@ impl<'a> AssetPath<'a> { self.label.as_deref() } + /// Gets the "sub-asset label". + #[inline] + pub fn label_cow(&self) -> Option> { + self.label.clone() + } + /// Gets the path to the asset in the "virtual filesystem". #[inline] pub fn path(&self) -> &Path { diff --git a/crates/bevy_asset/src/server/mod.rs b/crates/bevy_asset/src/server/mod.rs index 03cf286349214..160a3e4d3228c 100644 --- a/crates/bevy_asset/src/server/mod.rs +++ b/crates/bevy_asset/src/server/mod.rs @@ -257,7 +257,7 @@ impl AssetServer { path: impl Into>, meta_transform: Option, ) -> Handle { - let mut path = path.into().into_owned(); + let path = path.into().into_owned(); let (handle, should_load) = self.data.infos.write().get_or_create_path_handle::( path.clone(), HandleLoadingMode::Request, @@ -265,13 +265,10 @@ impl AssetServer { ); if should_load { - let mut owned_handle = Some(handle.clone().untyped()); + let owned_handle = Some(handle.clone().untyped()); let server = self.clone(); IoTaskPool::get() .spawn(async move { - if path.take_label().is_some() { - owned_handle = None; - } if let Err(err) = server.load_internal(owned_handle, path, false, None).await { error!("{}", err); } @@ -291,6 +288,10 @@ impl AssetServer { self.load_internal(None, path, false, None).await } + /// Performs an async asset load. + /// + /// `input_handle` must only be [`Some`] if `should_load` was true when retrieving `input_handle`. This is an optimization to + /// avoid looking up `should_load` twice, but it means you _must_ be sure a load is necessary when calling this function with [`Some`]. async fn load_internal<'a>( &self, input_handle: Option, @@ -298,7 +299,7 @@ impl AssetServer { force: bool, meta_transform: Option, ) -> Result { - let mut path = path.into_owned(); + let path = path.into_owned(); let path_clone = path.clone(); let (mut meta, loader, mut reader) = self .get_meta_loader_and_reader(&path_clone) @@ -312,18 +313,8 @@ impl AssetServer { e })?; - let has_label = path.label().is_some(); - let (handle, should_load) = match input_handle { Some(handle) => { - if !has_label && handle.type_id() != loader.asset_type_id() { - return Err(AssetLoadError::RequestedHandleTypeMismatch { - path: path.into_owned(), - requested: handle.type_id(), - actual_asset_name: loader.asset_type_name(), - loader_name: loader.type_name(), - }); - } // if a handle was passed in, the "should load" check was already done (handle, true) } @@ -339,37 +330,51 @@ impl AssetServer { } }; + if path.label().is_none() && handle.type_id() != loader.asset_type_id() { + return Err(AssetLoadError::RequestedHandleTypeMismatch { + path: path.into_owned(), + requested: handle.type_id(), + actual_asset_name: loader.asset_type_name(), + loader_name: loader.type_name(), + }); + } + if !should_load && !force { return Ok(handle); } - let base_asset_id = if has_label { - path.remove_label(); - // If the path has a label, the current id does not match the asset root type. - // We need to get the actual asset id + + let (base_handle, base_path) = if path.label().is_some() { let mut infos = self.data.infos.write(); - let (actual_handle, _) = infos.get_or_create_path_handle_untyped( - path.clone(), + let base_path = path.without_label().into_owned(); + let (base_handle, _) = infos.get_or_create_path_handle_untyped( + base_path.clone(), loader.asset_type_id(), loader.asset_type_name(), - // ignore current load state ... we kicked off this sub asset load because it needed to be loaded but - // does not currently exist HandleLoadingMode::Force, None, ); - actual_handle.id() + (base_handle, base_path) } else { - handle.id() + (handle.clone(), path.clone()) }; - if let Some(meta_transform) = handle.meta_transform() { + if let Some(meta_transform) = base_handle.meta_transform() { (*meta_transform)(&mut *meta); } match self - .load_with_meta_loader_and_reader(&path, meta, &*loader, &mut *reader, true, false) + .load_with_meta_loader_and_reader(&base_path, meta, &*loader, &mut *reader, true, false) .await { Ok(mut loaded_asset) => { + if let Some(label) = path.label_cow() { + if !loaded_asset.labeled_assets.contains_key(&label) { + return Err(AssetLoadError::MissingLabel { + base_path, + label: label.to_string(), + }); + } + } for (_, labeled_asset) in loaded_asset.labeled_assets.drain() { self.send_asset_event(InternalAssetEvent::Loaded { id: labeled_asset.handle.id(), @@ -377,13 +382,15 @@ impl AssetServer { }); } self.send_asset_event(InternalAssetEvent::Loaded { - id: base_asset_id, + id: base_handle.id(), loaded_asset, }); Ok(handle) } Err(err) => { - self.send_asset_event(InternalAssetEvent::Failed { id: base_asset_id }); + self.send_asset_event(InternalAssetEvent::Failed { + id: base_handle.id(), + }); Err(err) } } @@ -935,6 +942,11 @@ pub enum AssetLoadError { loader_name: &'static str, error: Box, }, + #[error("The file at '{base_path}' does not contain the labeled asset '{label}'.")] + MissingLabel { + base_path: AssetPath<'static>, + label: String, + }, } /// An error that occurs when an [`AssetLoader`] is not registered for a given extension. From 3d79dc4cdc3d4e56668bc314c9ee0bacb4bf0452 Mon Sep 17 00:00:00 2001 From: Nuutti Kotivuori Date: Mon, 16 Oct 2023 04:57:55 +0300 Subject: [PATCH 12/63] Unify `FixedTime` and `Time` while fixing several problems (#8964) # Objective Current `FixedTime` and `Time` have several problems. This pull aims to fix many of them at once. - If there is a longer pause between app updates, time will jump forward a lot at once and fixed time will iterate on `FixedUpdate` for a large number of steps. If the pause is merely seconds, then this will just mean jerkiness and possible unexpected behaviour in gameplay. If the pause is hours/days as with OS suspend, the game will appear to freeze until it has caught up with real time. - If calculating a fixed step takes longer than specified fixed step period, the game will enter a death spiral where rendering each frame takes longer and longer due to more and more fixed step updates being run per frame and the game appears to freeze. - There is no way to see current fixed step elapsed time inside fixed steps. In order to track this, the game designer needs to add a custom system inside `FixedUpdate` that calculates elapsed or step count in a resource. - Access to delta time inside fixed step is `FixedStep::period` rather than `Time::delta`. This, coupled with the issue that `Time::elapsed` isn't available at all for fixed steps, makes it that time requiring systems are either implemented to be run in `FixedUpdate` or `Update`, but rarely work in both. - Fixes #8800 - Fixes #8543 - Fixes #7439 - Fixes #5692 ## Solution - Create a generic `Time` clock that has no processing logic but which can be instantiated for multiple usages. This is also exposed for users to add custom clocks. - Create three standard clocks, `Time`, `Time` and `Time`, all of which contain their individual logic. - Create one "default" clock, which is just `Time` (or `Time<()>`), which will be overwritten from `Time` on each update, and `Time` inside `FixedUpdate` schedule. This way systems that do not care specifically which time they track can work both in `Update` and `FixedUpdate` without changes and the behaviour is intuitive. - Add `max_delta` to virtual time update, which limits how much can be added to virtual time by a single update. This fixes both the behaviour after a long freeze, and also the death spiral by limiting how many fixed timestep iterations there can be per update. Possible future work could be adding `max_accumulator` to add a sort of "leaky bucket" time processing to possibly smooth out jumps in time while keeping frame rate stable. - Many minor tweaks and clarifications to the time functions and their documentation. ## Changelog - `Time::raw_delta()`, `Time::raw_elapsed()` and related methods are moved to `Time::delta()` and `Time::elapsed()` and now match `Time` API - `FixedTime` is now `Time` and matches `Time` API. - `Time` default timestep is now 64 Hz, or 15625 microseconds. - `Time` inside `FixedUpdate` now reflects fixed timestep time, making systems portable between `Update ` and `FixedUpdate`. - `Time::pause()`, `Time::set_relative_speed()` and related methods must now be called as `Time::pause()` etc. - There is a new `max_delta` setting in `Time` that limits how much the clock can jump by a single update. The default value is 0.25 seconds. - Removed `on_fixed_timer()` condition as `on_timer()` does the right thing inside `FixedUpdate` now. ## Migration Guide - Change all `Res

for SetMeshViewBindGroup { type Param = (); diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.rs b/crates/bevy_pbr/src/render/mesh_view_bindings.rs new file mode 100644 index 0000000000000..8ed474769525f --- /dev/null +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.rs @@ -0,0 +1,503 @@ +use std::array; + +use bevy_core_pipeline::{ + prepass::ViewPrepassTextures, + tonemapping::{ + get_lut_bind_group_layout_entries, get_lut_bindings, Tonemapping, TonemappingLuts, + }, +}; +use bevy_ecs::{ + component::Component, + entity::Entity, + system::{Commands, Query, Res}, +}; +use bevy_render::{ + globals::{GlobalsBuffer, GlobalsUniform}, + render_asset::RenderAssets, + render_resource::{ + BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, + BindGroupLayoutEntry, BindingResource, BindingType, BufferBindingType, SamplerBindingType, + ShaderStages, ShaderType, TextureFormat, TextureSampleType, TextureViewDimension, + }, + renderer::RenderDevice, + texture::{BevyDefault, FallbackImageCubemap, FallbackImageMsaa, Image}, + view::{Msaa, ViewUniform, ViewUniforms}, +}; + +use crate::{ + environment_map, prepass, EnvironmentMapLight, FogMeta, GlobalLightMeta, GpuFog, GpuLights, + GpuPointLights, LightMeta, MeshPipeline, MeshPipelineKey, ScreenSpaceAmbientOcclusionTextures, + ShadowSamplers, ViewClusterBindings, ViewShadowBindings, +}; + +#[derive(Clone)] +pub struct MeshPipelineViewLayout { + pub bind_group_layout: BindGroupLayout, + + #[cfg(debug_assertions)] + pub texture_count: usize, +} + +bitflags::bitflags! { + /// A key that uniquely identifies a [`MeshPipelineViewLayout`]. + /// + /// Used to generate all possible layouts for the mesh pipeline in [`generate_view_layouts`], + /// so special care must be taken to not add too many flags, as the number of possible layouts + /// will grow exponentially. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] + #[repr(transparent)] + pub struct MeshPipelineViewLayoutKey: u32 { + const MULTISAMPLED = (1 << 0); + const DEPTH_PREPASS = (1 << 1); + const NORMAL_PREPASS = (1 << 2); + const MOTION_VECTOR_PREPASS = (1 << 3); + const DEFERRED_PREPASS = (1 << 4); + } +} + +impl MeshPipelineViewLayoutKey { + // The number of possible layouts + pub const COUNT: usize = Self::all().bits() as usize + 1; + + /// Builds a unique label for each layout based on the flags + pub fn label(&self) -> String { + use MeshPipelineViewLayoutKey as Key; + + format!( + "mesh_view_layout{}{}{}{}{}", + self.contains(Key::MULTISAMPLED) + .then_some("_multisampled") + .unwrap_or_default(), + self.contains(Key::DEPTH_PREPASS) + .then_some("_depth") + .unwrap_or_default(), + self.contains(Key::NORMAL_PREPASS) + .then_some("_normal") + .unwrap_or_default(), + self.contains(Key::MOTION_VECTOR_PREPASS) + .then_some("_motion") + .unwrap_or_default(), + self.contains(Key::DEFERRED_PREPASS) + .then_some("_deferred") + .unwrap_or_default(), + ) + } +} + +impl From for MeshPipelineViewLayoutKey { + fn from(value: MeshPipelineKey) -> Self { + let mut result = MeshPipelineViewLayoutKey::empty(); + + if value.msaa_samples() > 1 { + result |= MeshPipelineViewLayoutKey::MULTISAMPLED; + } + if value.contains(MeshPipelineKey::DEPTH_PREPASS) { + result |= MeshPipelineViewLayoutKey::DEPTH_PREPASS; + } + if value.contains(MeshPipelineKey::NORMAL_PREPASS) { + result |= MeshPipelineViewLayoutKey::NORMAL_PREPASS; + } + if value.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { + result |= MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS; + } + if value.contains(MeshPipelineKey::DEFERRED_PREPASS) { + result |= MeshPipelineViewLayoutKey::DEFERRED_PREPASS; + } + + result + } +} + +impl From for MeshPipelineViewLayoutKey { + fn from(value: Msaa) -> Self { + let mut result = MeshPipelineViewLayoutKey::empty(); + + if value.samples() > 1 { + result |= MeshPipelineViewLayoutKey::MULTISAMPLED; + } + + result + } +} + +impl From> for MeshPipelineViewLayoutKey { + fn from(value: Option<&ViewPrepassTextures>) -> Self { + let mut result = MeshPipelineViewLayoutKey::empty(); + + if let Some(prepass_textures) = value { + if prepass_textures.depth.is_some() { + result |= MeshPipelineViewLayoutKey::DEPTH_PREPASS; + } + if prepass_textures.normal.is_some() { + result |= MeshPipelineViewLayoutKey::NORMAL_PREPASS; + } + if prepass_textures.motion_vectors.is_some() { + result |= MeshPipelineViewLayoutKey::MOTION_VECTOR_PREPASS; + } + if prepass_textures.deferred.is_some() { + result |= MeshPipelineViewLayoutKey::DEFERRED_PREPASS; + } + } + + result + } +} + +/// Returns the appropriate bind group layout vec based on the parameters +fn layout_entries( + clustered_forward_buffer_binding_type: BufferBindingType, + layout_key: MeshPipelineViewLayoutKey, +) -> Vec { + let mut entries = vec![ + // View + BindGroupLayoutEntry { + binding: 0, + visibility: ShaderStages::VERTEX | ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: BufferBindingType::Uniform, + has_dynamic_offset: true, + min_binding_size: Some(ViewUniform::min_size()), + }, + count: None, + }, + // Lights + BindGroupLayoutEntry { + binding: 1, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: BufferBindingType::Uniform, + has_dynamic_offset: true, + min_binding_size: Some(GpuLights::min_size()), + }, + count: None, + }, + // Point Shadow Texture Cube Array + BindGroupLayoutEntry { + binding: 2, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Texture { + multisampled: false, + sample_type: TextureSampleType::Depth, + #[cfg(any(not(feature = "webgl"), not(target_arch = "wasm32")))] + view_dimension: TextureViewDimension::CubeArray, + #[cfg(all(feature = "webgl", target_arch = "wasm32"))] + view_dimension: TextureViewDimension::Cube, + }, + count: None, + }, + // Point Shadow Texture Array Sampler + BindGroupLayoutEntry { + binding: 3, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Sampler(SamplerBindingType::Comparison), + count: None, + }, + // Directional Shadow Texture Array + BindGroupLayoutEntry { + binding: 4, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Texture { + multisampled: false, + sample_type: TextureSampleType::Depth, + #[cfg(any(not(feature = "webgl"), not(target_arch = "wasm32")))] + view_dimension: TextureViewDimension::D2Array, + #[cfg(all(feature = "webgl", target_arch = "wasm32"))] + view_dimension: TextureViewDimension::D2, + }, + count: None, + }, + // Directional Shadow Texture Array Sampler + BindGroupLayoutEntry { + binding: 5, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Sampler(SamplerBindingType::Comparison), + count: None, + }, + // PointLights + BindGroupLayoutEntry { + binding: 6, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: clustered_forward_buffer_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(GpuPointLights::min_size( + clustered_forward_buffer_binding_type, + )), + }, + count: None, + }, + // ClusteredLightIndexLists + BindGroupLayoutEntry { + binding: 7, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: clustered_forward_buffer_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(ViewClusterBindings::min_size_cluster_light_index_lists( + clustered_forward_buffer_binding_type, + )), + }, + count: None, + }, + // ClusterOffsetsAndCounts + BindGroupLayoutEntry { + binding: 8, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: clustered_forward_buffer_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(ViewClusterBindings::min_size_cluster_offsets_and_counts( + clustered_forward_buffer_binding_type, + )), + }, + count: None, + }, + // Globals + BindGroupLayoutEntry { + binding: 9, + visibility: ShaderStages::VERTEX_FRAGMENT, + ty: BindingType::Buffer { + ty: BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: Some(GlobalsUniform::min_size()), + }, + count: None, + }, + // Fog + BindGroupLayoutEntry { + binding: 10, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Buffer { + ty: BufferBindingType::Uniform, + has_dynamic_offset: true, + min_binding_size: Some(GpuFog::min_size()), + }, + count: None, + }, + // Screen space ambient occlusion texture + BindGroupLayoutEntry { + binding: 11, + visibility: ShaderStages::FRAGMENT, + ty: BindingType::Texture { + multisampled: false, + sample_type: TextureSampleType::Float { filterable: false }, + view_dimension: TextureViewDimension::D2, + }, + count: None, + }, + ]; + + // EnvironmentMapLight + let environment_map_entries = environment_map::get_bind_group_layout_entries([12, 13, 14]); + entries.extend_from_slice(&environment_map_entries); + + // Tonemapping + let tonemapping_lut_entries = get_lut_bind_group_layout_entries([15, 16]); + entries.extend_from_slice(&tonemapping_lut_entries); + + if cfg!(any(not(feature = "webgl"), not(target_arch = "wasm32"))) + || (cfg!(all(feature = "webgl", target_arch = "wasm32")) + && !layout_key.contains(MeshPipelineViewLayoutKey::MULTISAMPLED)) + { + entries.extend_from_slice(&prepass::get_bind_group_layout_entries( + [17, 18, 19, 20], + layout_key, + )); + } + + entries +} + +/// Generates all possible view layouts for the mesh pipeline, based on all combinations of +/// [`MeshPipelineViewLayoutKey`] flags. +pub fn generate_view_layouts( + render_device: &RenderDevice, + clustered_forward_buffer_binding_type: BufferBindingType, +) -> [MeshPipelineViewLayout; MeshPipelineViewLayoutKey::COUNT] { + array::from_fn(|i| { + let key = MeshPipelineViewLayoutKey::from_bits_truncate(i as u32); + let entries = layout_entries(clustered_forward_buffer_binding_type, key); + + #[cfg(debug_assertions)] + let texture_count: usize = entries + .iter() + .filter(|entry| matches!(entry.ty, BindingType::Texture { .. })) + .count(); + + MeshPipelineViewLayout { + bind_group_layout: render_device.create_bind_group_layout(&BindGroupLayoutDescriptor { + label: Some(key.label().as_str()), + entries: &entries, + }), + #[cfg(debug_assertions)] + texture_count, + } + }) +} + +#[derive(Component)] +pub struct MeshViewBindGroup { + pub value: BindGroup, +} + +#[allow(clippy::too_many_arguments)] +pub fn prepare_mesh_view_bind_groups( + mut commands: Commands, + render_device: Res, + mesh_pipeline: Res, + shadow_samplers: Res, + light_meta: Res, + global_light_meta: Res, + fog_meta: Res, + view_uniforms: Res, + views: Query<( + Entity, + &ViewShadowBindings, + &ViewClusterBindings, + Option<&ScreenSpaceAmbientOcclusionTextures>, + Option<&ViewPrepassTextures>, + Option<&EnvironmentMapLight>, + &Tonemapping, + )>, + (images, mut fallback_images, fallback_cubemap): ( + Res>, + FallbackImageMsaa, + Res, + ), + msaa: Res, + globals_buffer: Res, + tonemapping_luts: Res, +) { + if let ( + Some(view_binding), + Some(light_binding), + Some(point_light_binding), + Some(globals), + Some(fog_binding), + ) = ( + view_uniforms.uniforms.binding(), + light_meta.view_gpu_lights.binding(), + global_light_meta.gpu_point_lights.binding(), + globals_buffer.buffer.binding(), + fog_meta.gpu_fogs.binding(), + ) { + for ( + entity, + view_shadow_bindings, + view_cluster_bindings, + ssao_textures, + prepass_textures, + environment_map, + tonemapping, + ) in &views + { + let fallback_ssao = fallback_images + .image_for_samplecount(1, TextureFormat::bevy_default()) + .texture_view + .clone(); + + let layout = &mesh_pipeline.get_view_layout( + MeshPipelineViewLayoutKey::from(*msaa) + | MeshPipelineViewLayoutKey::from(prepass_textures), + ); + + let mut entries = vec![ + BindGroupEntry { + binding: 0, + resource: view_binding.clone(), + }, + BindGroupEntry { + binding: 1, + resource: light_binding.clone(), + }, + BindGroupEntry { + binding: 2, + resource: BindingResource::TextureView( + &view_shadow_bindings.point_light_depth_texture_view, + ), + }, + BindGroupEntry { + binding: 3, + resource: BindingResource::Sampler(&shadow_samplers.point_light_sampler), + }, + BindGroupEntry { + binding: 4, + resource: BindingResource::TextureView( + &view_shadow_bindings.directional_light_depth_texture_view, + ), + }, + BindGroupEntry { + binding: 5, + resource: BindingResource::Sampler(&shadow_samplers.directional_light_sampler), + }, + BindGroupEntry { + binding: 6, + resource: point_light_binding.clone(), + }, + BindGroupEntry { + binding: 7, + resource: view_cluster_bindings.light_index_lists_binding().unwrap(), + }, + BindGroupEntry { + binding: 8, + resource: view_cluster_bindings.offsets_and_counts_binding().unwrap(), + }, + BindGroupEntry { + binding: 9, + resource: globals.clone(), + }, + BindGroupEntry { + binding: 10, + resource: fog_binding.clone(), + }, + BindGroupEntry { + binding: 11, + resource: BindingResource::TextureView( + ssao_textures + .map(|t| &t.screen_space_ambient_occlusion_texture.default_view) + .unwrap_or(&fallback_ssao), + ), + }, + ]; + + let env_map = environment_map::get_bindings( + environment_map, + &images, + &fallback_cubemap, + [12, 13, 14], + ); + entries.extend_from_slice(&env_map); + + let tonemapping_luts = + get_lut_bindings(&images, &tonemapping_luts, tonemapping, [15, 16]); + entries.extend_from_slice(&tonemapping_luts); + + let label = Some("mesh_view_bind_group"); + + // When using WebGL, we can't have a depth texture with multisampling + let prepass_bindings = if cfg!(any(not(feature = "webgl"), not(target_arch = "wasm32"))) + || (cfg!(all(feature = "webgl", target_arch = "wasm32")) && msaa.samples() == 1) + { + Some(prepass::get_bindings(prepass_textures)) + } else { + None + }; + + // This if statement is here to make the borrow checker happy. + // Ideally we could just have `entries.extend_from_slice(&prepass_bindings.get_entries([17, 18, 19, 20]));` + // in the existing if statement above, but that either doesn't allow `prepass_bindings` to live long enough, + // as its used when creating the bind group at the end of the function, or causes a `cannot move out of` error. + if let Some(prepass_bindings) = &prepass_bindings { + entries.extend_from_slice(&prepass_bindings.get_entries([17, 18, 19, 20])); + } + + commands.entity(entity).insert(MeshViewBindGroup { + value: render_device.create_bind_group(&BindGroupDescriptor { + entries: &entries, + label, + layout, + }), + }); + } + } +} diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl index f3b51ddc50abb..6d68aa305a091 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl @@ -42,12 +42,31 @@ @group(0) @binding(16) var dt_lut_sampler: sampler; #ifdef MULTISAMPLED + +#ifdef DEPTH_PREPASS @group(0) @binding(17) var depth_prepass_texture: texture_depth_multisampled_2d; +#endif // DEPTH_PREPASS +#ifdef NORMAL_PREPASS @group(0) @binding(18) var normal_prepass_texture: texture_multisampled_2d; +#endif // NORMAL_PREPASS +#ifdef MOTION_VECTOR_PREPASS @group(0) @binding(19) var motion_vector_prepass_texture: texture_multisampled_2d; -#else +#endif // MOTION_VECTOR_PREPASS + +#else // MULTISAMPLED + +#ifdef DEPTH_PREPASS @group(0) @binding(17) var depth_prepass_texture: texture_depth_2d; +#endif // DEPTH_PREPASS +#ifdef NORMAL_PREPASS @group(0) @binding(18) var normal_prepass_texture: texture_2d; +#endif // NORMAL_PREPASS +#ifdef MOTION_VECTOR_PREPASS @group(0) @binding(19) var motion_vector_prepass_texture: texture_2d; +#endif // MOTION_VECTOR_PREPASS + +#endif // MULTISAMPLED + +#ifdef DEFERRED_PREPASS @group(0) @binding(20) var deferred_prepass_texture: texture_2d; -#endif +#endif // DEFERRED_PREPASS diff --git a/crates/bevy_pbr/src/render/mod.rs b/crates/bevy_pbr/src/render/mod.rs index b9d0d239c3874..7efffc05681f8 100644 --- a/crates/bevy_pbr/src/render/mod.rs +++ b/crates/bevy_pbr/src/render/mod.rs @@ -2,6 +2,7 @@ mod fog; mod light; pub(crate) mod mesh; mod mesh_bindings; +mod mesh_view_bindings; mod morph; mod skin; @@ -9,4 +10,5 @@ pub use fog::*; pub use light::*; pub use mesh::*; pub use mesh_bindings::MeshLayouts; +pub use mesh_view_bindings::*; pub use skin::{extract_skins, prepare_skins, SkinIndex, SkinUniform, MAX_JOINTS}; From 61bad4eb5704b6859d32c69c3c45c24e77372c10 Mon Sep 17 00:00:00 2001 From: robtfm <50659922+robtfm@users.noreply.github.com> Date: Sat, 21 Oct 2023 12:51:58 +0100 Subject: [PATCH 45/63] update shader imports (#10180) # Objective - bump naga_oil to 0.10 - update shader imports to use rusty syntax ## Migration Guide naga_oil 0.10 reworks the import mechanism to support more syntax to make it more rusty, and test for item use before importing to determine which imports are modules and which are items, which allows: - use rust-style imports ``` #import bevy_pbr::{ pbr_functions::{alpha_discard as discard, apply_pbr_lighting}, mesh_bindings, } ``` - import partial paths: ``` #import part::of::path ... path::remainder::function(); ``` which will call to `part::of::path::remainder::function` - use fully qualified paths without importing: ``` // #import bevy_pbr::pbr_functions bevy_pbr::pbr_functions::pbr() ``` - use imported items without qualifying ``` #import bevy_pbr::pbr_functions::pbr // for backwards compatibility the old style is still supported: // #import bevy_pbr::pbr_functions pbr ... pbr() ``` - allows most imported items to end with `_` and numbers (naga_oil#30). still doesn't allow struct members to end with `_` or numbers but it's progress. - the vast majority of existing shader code will work without changes, but will emit "deprecated" warnings for old-style imports. these can be suppressed with the `allow-deprecated` feature. - partly breaks overrides (as far as i'm aware nobody uses these yet) - now overrides will only be applied if the overriding module is added as an additional import in the arguments to `Composer::make_naga_module` or `Composer::add_composable_module`. this is necessary to support determining whether imports are modules or items. --- assets/shaders/animate_shader.wgsl | 6 ++- assets/shaders/array_texture.wgsl | 12 +++-- assets/shaders/cubemap_unlit.wgsl | 2 +- assets/shaders/custom_gltf_2d.wgsl | 7 +-- assets/shaders/custom_material.wgsl | 4 +- .../custom_material_screenspace_texture.wgsl | 8 +-- assets/shaders/custom_vertex_attribute.wgsl | 3 +- assets/shaders/extended_material.wgsl | 18 ++++--- assets/shaders/fallback_image_test.wgsl | 4 +- assets/shaders/instancing.wgsl | 3 +- assets/shaders/line_material.wgsl | 2 +- assets/shaders/post_processing.wgsl | 4 +- assets/shaders/shader_defs.wgsl | 2 +- assets/shaders/show_prepass.wgsl | 9 ++-- assets/shaders/texture_binding_array.wgsl | 2 +- assets/shaders/tonemapping_test_patterns.wgsl | 13 ++--- crates/bevy_core_pipeline/src/blit/blit.wgsl | 2 +- .../bevy_core_pipeline/src/bloom/bloom.wgsl | 2 - .../robust_contrast_adaptive_sharpening.wgsl | 2 +- .../deferred/copy_deferred_lighting_id.wgsl | 3 +- crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl | 2 +- .../bevy_core_pipeline/src/skybox/skybox.wgsl | 4 +- crates/bevy_core_pipeline/src/taa/taa.wgsl | 2 - .../src/tonemapping/tonemapping.wgsl | 12 ++--- .../src/tonemapping/tonemapping_shared.wgsl | 2 +- crates/bevy_gizmos/src/lines.wgsl | 2 +- crates/bevy_pbr/Cargo.toml | 2 +- .../src/deferred/deferred_lighting.wgsl | 23 ++++----- .../src/deferred/pbr_deferred_functions.wgsl | 19 +++---- .../src/deferred/pbr_deferred_types.wgsl | 7 ++- .../src/environment_map/environment_map.wgsl | 2 +- crates/bevy_pbr/src/prepass/prepass.wgsl | 50 ++++++++++--------- .../src/prepass/prepass_bindings.wgsl | 2 - .../src/render/clustered_forward.wgsl | 6 ++- crates/bevy_pbr/src/render/fog.wgsl | 6 ++- crates/bevy_pbr/src/render/mesh.wgsl | 23 +++++---- crates/bevy_pbr/src/render/mesh_bindings.wgsl | 2 +- .../bevy_pbr/src/render/mesh_functions.wgsl | 14 ++++-- .../src/render/mesh_view_bindings.wgsl | 6 ++- .../bevy_pbr/src/render/mesh_view_types.wgsl | 3 -- crates/bevy_pbr/src/render/morph.wgsl | 11 +--- .../bevy_pbr/src/render/parallax_mapping.wgsl | 2 +- crates/bevy_pbr/src/render/pbr.wgsl | 20 +++++--- crates/bevy_pbr/src/render/pbr_ambient.wgsl | 6 ++- crates/bevy_pbr/src/render/pbr_bindings.wgsl | 2 +- crates/bevy_pbr/src/render/pbr_fragment.wgsl | 26 +++++----- crates/bevy_pbr/src/render/pbr_functions.wgsl | 29 ++++++----- crates/bevy_pbr/src/render/pbr_lighting.wgsl | 10 ++-- crates/bevy_pbr/src/render/pbr_prepass.wgsl | 33 ++++++------ .../src/render/pbr_prepass_functions.wgsl | 13 ++--- .../bevy_pbr/src/render/shadow_sampling.wgsl | 6 ++- crates/bevy_pbr/src/render/shadows.wgsl | 10 ++-- crates/bevy_pbr/src/render/skinning.wgsl | 2 +- crates/bevy_pbr/src/render/wireframe.wgsl | 3 +- crates/bevy_pbr/src/ssao/gtao.wgsl | 12 +++-- crates/bevy_pbr/src/ssao/gtao_utils.wgsl | 2 +- .../bevy_pbr/src/ssao/preprocess_depth.wgsl | 2 +- crates/bevy_pbr/src/ssao/spatial_denoise.wgsl | 2 +- crates/bevy_render/Cargo.toml | 2 +- .../src/mesh2d/color_material.wgsl | 9 ++-- crates/bevy_sprite/src/mesh2d/mesh2d.wgsl | 11 ++-- .../src/mesh2d/mesh2d_bindings.wgsl | 2 +- .../src/mesh2d/mesh2d_functions.wgsl | 12 +++-- .../src/mesh2d/mesh2d_view_bindings.wgsl | 4 +- crates/bevy_sprite/src/render/sprite.wgsl | 8 +-- crates/bevy_ui/src/render/ui.wgsl | 2 +- examples/2d/mesh2d_manual.rs | 7 ++- 67 files changed, 290 insertions(+), 255 deletions(-) diff --git a/assets/shaders/animate_shader.wgsl b/assets/shaders/animate_shader.wgsl index addb1dee89b34..0e369674ac38e 100644 --- a/assets/shaders/animate_shader.wgsl +++ b/assets/shaders/animate_shader.wgsl @@ -1,6 +1,8 @@ // The time since startup data is in the globals binding which is part of the mesh_view_bindings import -#import bevy_pbr::mesh_view_bindings globals -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::{ + mesh_view_bindings::globals, + forward_io::VertexOutput, +} fn oklab_to_linear_srgb(c: vec3) -> vec3 { let L = c.x; diff --git a/assets/shaders/array_texture.wgsl b/assets/shaders/array_texture.wgsl index dd55487a697eb..24eaa1b549648 100644 --- a/assets/shaders/array_texture.wgsl +++ b/assets/shaders/array_texture.wgsl @@ -1,8 +1,10 @@ -#import bevy_pbr::forward_io VertexOutput -#import bevy_pbr::mesh_view_bindings view -#import bevy_pbr::pbr_types STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT, PbrInput, pbr_input_new -#import bevy_core_pipeline::tonemapping tone_mapping -#import bevy_pbr::pbr_functions as fns +#import bevy_pbr::{ + forward_io::VertexOutput, + mesh_view_bindings::view, + pbr_types::{STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT, PbrInput, pbr_input_new}, + pbr_functions as fns, +} +#import bevy_core_pipeline::tonemapping::tone_mapping @group(1) @binding(0) var my_array_texture: texture_2d_array; @group(1) @binding(1) var my_array_texture_sampler: sampler; diff --git a/assets/shaders/cubemap_unlit.wgsl b/assets/shaders/cubemap_unlit.wgsl index 56a5b005008e6..425eb2f5f2476 100644 --- a/assets/shaders/cubemap_unlit.wgsl +++ b/assets/shaders/cubemap_unlit.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput #ifdef CUBEMAP_ARRAY @group(1) @binding(0) var base_color_texture: texture_cube_array; diff --git a/assets/shaders/custom_gltf_2d.wgsl b/assets/shaders/custom_gltf_2d.wgsl index 5e0a908c875d4..1ea793ad35696 100644 --- a/assets/shaders/custom_gltf_2d.wgsl +++ b/assets/shaders/custom_gltf_2d.wgsl @@ -1,6 +1,7 @@ -#import bevy_sprite::mesh2d_view_bindings globals -#import bevy_sprite::mesh2d_bindings mesh -#import bevy_sprite::mesh2d_functions get_model_matrix, mesh2d_position_local_to_clip +#import bevy_sprite::{ + mesh2d_view_bindings::globals, + mesh2d_functions::{get_model_matrix, mesh2d_position_local_to_clip}, +} struct Vertex { @builtin(instance_index) instance_index: u32, diff --git a/assets/shaders/custom_material.wgsl b/assets/shaders/custom_material.wgsl index b1c8d75ed9ff5..90322438e68d2 100644 --- a/assets/shaders/custom_material.wgsl +++ b/assets/shaders/custom_material.wgsl @@ -1,6 +1,6 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput // we can import items from shader modules in the assets folder with a quoted path -#import "shaders/custom_material_import.wgsl" COLOR_MULTIPLIER +#import "shaders/custom_material_import.wgsl"::COLOR_MULTIPLIER struct CustomMaterial { color: vec4, diff --git a/assets/shaders/custom_material_screenspace_texture.wgsl b/assets/shaders/custom_material_screenspace_texture.wgsl index 50fdfdd2b10c3..a4afd6422530b 100644 --- a/assets/shaders/custom_material_screenspace_texture.wgsl +++ b/assets/shaders/custom_material_screenspace_texture.wgsl @@ -1,6 +1,8 @@ -#import bevy_pbr::mesh_view_bindings view -#import bevy_pbr::forward_io VertexOutput -#import bevy_pbr::utils coords_to_viewport_uv +#import bevy_pbr::{ + mesh_view_bindings::view, + forward_io::VertexOutput, + utils::coords_to_viewport_uv, +} @group(1) @binding(0) var texture: texture_2d; @group(1) @binding(1) var texture_sampler: sampler; diff --git a/assets/shaders/custom_vertex_attribute.wgsl b/assets/shaders/custom_vertex_attribute.wgsl index 01f6af42c4cb8..d17246f89abf0 100644 --- a/assets/shaders/custom_vertex_attribute.wgsl +++ b/assets/shaders/custom_vertex_attribute.wgsl @@ -1,5 +1,4 @@ -#import bevy_pbr::mesh_bindings mesh -#import bevy_pbr::mesh_functions get_model_matrix, mesh_position_local_to_clip +#import bevy_pbr::mesh_functions::{get_model_matrix, mesh_position_local_to_clip} struct CustomMaterial { color: vec4, diff --git a/assets/shaders/extended_material.wgsl b/assets/shaders/extended_material.wgsl index c39848c77d2cf..c6fd8aea9dd79 100644 --- a/assets/shaders/extended_material.wgsl +++ b/assets/shaders/extended_material.wgsl @@ -1,12 +1,18 @@ -#import bevy_pbr::pbr_fragment pbr_input_from_standard_material -#import bevy_pbr::pbr_functions alpha_discard +#import bevy_pbr::{ + pbr_fragment::pbr_input_from_standard_material, + pbr_functions::alpha_discard, +} #ifdef PREPASS_PIPELINE -#import bevy_pbr::prepass_io VertexOutput, FragmentOutput -#import bevy_pbr::pbr_deferred_functions deferred_output +#import bevy_pbr::{ + prepass_io::{VertexOutput, FragmentOutput}, + pbr_deferred_functions::deferred_output, +} #else -#import bevy_pbr::forward_io VertexOutput, FragmentOutput -#import bevy_pbr::pbr_functions apply_pbr_lighting, main_pass_post_lighting_processing +#import bevy_pbr::{ + forward_io::{VertexOutput, FragmentOutput}, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, +} #endif struct MyExtendedMaterial { diff --git a/assets/shaders/fallback_image_test.wgsl b/assets/shaders/fallback_image_test.wgsl index f51d9961cde7a..59124786580ab 100644 --- a/assets/shaders/fallback_image_test.wgsl +++ b/assets/shaders/fallback_image_test.wgsl @@ -1,6 +1,4 @@ -#import bevy_pbr::mesh_view_bindings -#import bevy_pbr::mesh_bindings -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput @group(1) @binding(0) var test_texture_1d: texture_1d; @group(1) @binding(1) var test_texture_1d_sampler: sampler; diff --git a/assets/shaders/instancing.wgsl b/assets/shaders/instancing.wgsl index 52e7d9696af81..5491921a6e151 100644 --- a/assets/shaders/instancing.wgsl +++ b/assets/shaders/instancing.wgsl @@ -1,5 +1,4 @@ -#import bevy_pbr::mesh_functions get_model_matrix, mesh_position_local_to_clip -#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_functions::{get_model_matrix, mesh_position_local_to_clip} struct Vertex { @location(0) position: vec3, diff --git a/assets/shaders/line_material.wgsl b/assets/shaders/line_material.wgsl index ed06a27cd069f..e2ae15c1312c0 100644 --- a/assets/shaders/line_material.wgsl +++ b/assets/shaders/line_material.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput struct LineMaterial { color: vec4, diff --git a/assets/shaders/post_processing.wgsl b/assets/shaders/post_processing.wgsl index 398c08d320893..37bc40c6fea7e 100644 --- a/assets/shaders/post_processing.wgsl +++ b/assets/shaders/post_processing.wgsl @@ -1,7 +1,5 @@ // This shader computes the chromatic aberration effect -#import bevy_pbr::utils - // Since post processing is a fullscreen effect, we use the fullscreen vertex shader provided by bevy. // This will import a vertex shader that renders a single fullscreen triangle. // @@ -20,7 +18,7 @@ // As you can see, the triangle ends up bigger than the screen. // // You don't need to worry about this too much since bevy will compute the correct UVs for you. -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput @group(0) @binding(0) var screen_texture: texture_2d; @group(0) @binding(1) var texture_sampler: sampler; diff --git a/assets/shaders/shader_defs.wgsl b/assets/shaders/shader_defs.wgsl index 0586b560c78ff..7b98daca05962 100644 --- a/assets/shaders/shader_defs.wgsl +++ b/assets/shaders/shader_defs.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput struct CustomMaterial { color: vec4, diff --git a/assets/shaders/show_prepass.wgsl b/assets/shaders/show_prepass.wgsl index 881c5724d27b4..e80ea5c39d171 100644 --- a/assets/shaders/show_prepass.wgsl +++ b/assets/shaders/show_prepass.wgsl @@ -1,7 +1,8 @@ -#import bevy_pbr::mesh_types -#import bevy_pbr::mesh_view_bindings globals -#import bevy_pbr::prepass_utils -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::{ + mesh_view_bindings::globals, + prepass_utils, + forward_io::VertexOutput, +} struct ShowPrepassSettings { show_depth: u32, diff --git a/assets/shaders/texture_binding_array.wgsl b/assets/shaders/texture_binding_array.wgsl index ee92b85b53231..440e3c6155700 100644 --- a/assets/shaders/texture_binding_array.wgsl +++ b/assets/shaders/texture_binding_array.wgsl @@ -1,4 +1,4 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput @group(1) @binding(0) var textures: binding_array>; @group(1) @binding(1) var nearest_sampler: sampler; diff --git a/assets/shaders/tonemapping_test_patterns.wgsl b/assets/shaders/tonemapping_test_patterns.wgsl index 7e64d699bef22..891a66f3a1f45 100644 --- a/assets/shaders/tonemapping_test_patterns.wgsl +++ b/assets/shaders/tonemapping_test_patterns.wgsl @@ -1,10 +1,11 @@ -#import bevy_pbr::mesh_view_bindings -#import bevy_pbr::mesh_bindings -#import bevy_pbr::forward_io VertexOutput -#import bevy_pbr::utils PI +#import bevy_pbr::{ + mesh_view_bindings, + forward_io::VertexOutput, + utils::PI, +} #ifdef TONEMAP_IN_SHADER -#import bevy_core_pipeline::tonemapping tone_mapping +#import bevy_core_pipeline::tonemapping::tone_mapping #endif // Sweep across hues on y axis with value from 0.0 to +15EV across x axis @@ -55,7 +56,7 @@ fn fragment( } var color = vec4(out, 1.0); #ifdef TONEMAP_IN_SHADER - color = tone_mapping(color, bevy_pbr::mesh_view_bindings::view.color_grading); + color = tone_mapping(color, mesh_view_bindings::view.color_grading); #endif return color; } diff --git a/crates/bevy_core_pipeline/src/blit/blit.wgsl b/crates/bevy_core_pipeline/src/blit/blit.wgsl index 5ee6c1a6f92cf..82521bf312154 100644 --- a/crates/bevy_core_pipeline/src/blit/blit.wgsl +++ b/crates/bevy_core_pipeline/src/blit/blit.wgsl @@ -1,4 +1,4 @@ -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput @group(0) @binding(0) var in_texture: texture_2d; @group(0) @binding(1) var in_sampler: sampler; diff --git a/crates/bevy_core_pipeline/src/bloom/bloom.wgsl b/crates/bevy_core_pipeline/src/bloom/bloom.wgsl index 3666f105b1180..8234b72213690 100644 --- a/crates/bevy_core_pipeline/src/bloom/bloom.wgsl +++ b/crates/bevy_core_pipeline/src/bloom/bloom.wgsl @@ -6,8 +6,6 @@ // * [COD] - Next Generation Post Processing in Call of Duty - http://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare // * [PBB] - Physically Based Bloom - https://learnopengl.com/Guest-Articles/2022/Phys.-Based-Bloom -#import bevy_core_pipeline::fullscreen_vertex_shader - struct BloomUniforms { threshold_precomputations: vec4, viewport: vec4, diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl index 68e299cfa1f9a..252d97c9d6c3e 100644 --- a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl +++ b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl @@ -17,7 +17,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput struct CASUniforms { sharpness: f32, diff --git a/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id.wgsl b/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id.wgsl index 2fc7b1d86748d..25acf47068222 100644 --- a/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id.wgsl +++ b/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id.wgsl @@ -1,5 +1,4 @@ -#import bevy_pbr::utils -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput @group(0) @binding(0) var material_id_texture: texture_2d; diff --git a/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl b/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl index d2302a2267b25..2ff080de5e8e5 100644 --- a/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl +++ b/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl @@ -6,7 +6,7 @@ // // Tweaks by mrDIMAS - https://github.com/FyroxEngine/Fyrox/blob/master/src/renderer/shaders/fxaa_fs.glsl -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput @group(0) @binding(0) var screenTexture: texture_2d; @group(0) @binding(1) var samp: sampler; diff --git a/crates/bevy_core_pipeline/src/skybox/skybox.wgsl b/crates/bevy_core_pipeline/src/skybox/skybox.wgsl index cf22c2b2970f2..7da40da7937d4 100644 --- a/crates/bevy_core_pipeline/src/skybox/skybox.wgsl +++ b/crates/bevy_core_pipeline/src/skybox/skybox.wgsl @@ -1,5 +1,5 @@ -#import bevy_render::view View -#import bevy_pbr::utils coords_to_viewport_uv +#import bevy_render::view::View +#import bevy_pbr::utils::coords_to_viewport_uv @group(0) @binding(0) var skybox: texture_cube; @group(0) @binding(1) var skybox_sampler: sampler; diff --git a/crates/bevy_core_pipeline/src/taa/taa.wgsl b/crates/bevy_core_pipeline/src/taa/taa.wgsl index 53be6b02f0ecf..43414b63121c0 100644 --- a/crates/bevy_core_pipeline/src/taa/taa.wgsl +++ b/crates/bevy_core_pipeline/src/taa/taa.wgsl @@ -10,8 +10,6 @@ const DEFAULT_HISTORY_BLEND_RATE: f32 = 0.1; // Default blend rate to use when no confidence in history const MIN_HISTORY_BLEND_RATE: f32 = 0.015; // Minimum blend rate allowed, to ensure at least some of the current sample is used -#import bevy_core_pipeline::fullscreen_vertex_shader - @group(0) @binding(0) var view_target: texture_2d; @group(0) @binding(1) var history: texture_2d; @group(0) @binding(2) var motion_vectors: texture_2d; diff --git a/crates/bevy_core_pipeline/src/tonemapping/tonemapping.wgsl b/crates/bevy_core_pipeline/src/tonemapping/tonemapping.wgsl index 4c73a891c81eb..a4eee79c7402a 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/tonemapping.wgsl +++ b/crates/bevy_core_pipeline/src/tonemapping/tonemapping.wgsl @@ -1,8 +1,10 @@ #define TONEMAPPING_PASS -#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput -#import bevy_render::view View -#import bevy_core_pipeline::tonemapping tone_mapping, powsafe, screen_space_dither +#import bevy_render::view::View +#import bevy_core_pipeline::{ + fullscreen_vertex_shader::FullscreenVertexOutput, + tonemapping::{tone_mapping, powsafe, screen_space_dither}, +} @group(0) @binding(0) var view: View; @@ -11,8 +13,6 @@ @group(0) @binding(3) var dt_lut_texture: texture_3d; @group(0) @binding(4) var dt_lut_sampler: sampler; -#import bevy_core_pipeline::tonemapping - @fragment fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { let hdr_color = textureSample(hdr_texture, hdr_sampler, in.uv); @@ -21,7 +21,7 @@ fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { #ifdef DEBAND_DITHER output_rgb = powsafe(output_rgb.rgb, 1.0 / 2.2); - output_rgb = output_rgb + bevy_core_pipeline::tonemapping::screen_space_dither(in.position.xy); + output_rgb = output_rgb + screen_space_dither(in.position.xy); // This conversion back to linear space is required because our output texture format is // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. output_rgb = powsafe(output_rgb.rgb, 2.2); diff --git a/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared.wgsl b/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared.wgsl index b8ca8f0d68f81..92da49b8242b8 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared.wgsl +++ b/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_core_pipeline::tonemapping -#import bevy_render::view View, ColorGrading +#import bevy_render::view::ColorGrading // hack !! not sure what to do with this #ifdef TONEMAPPING_PASS diff --git a/crates/bevy_gizmos/src/lines.wgsl b/crates/bevy_gizmos/src/lines.wgsl index 377194f3f1c07..2eb6ef907aeb8 100644 --- a/crates/bevy_gizmos/src/lines.wgsl +++ b/crates/bevy_gizmos/src/lines.wgsl @@ -1,5 +1,5 @@ // TODO use common view binding -#import bevy_render::view View +#import bevy_render::view::View @group(0) @binding(0) var view: View; diff --git a/crates/bevy_pbr/Cargo.toml b/crates/bevy_pbr/Cargo.toml index c60174651dd41..dc2cdeb1235d2 100644 --- a/crates/bevy_pbr/Cargo.toml +++ b/crates/bevy_pbr/Cargo.toml @@ -31,6 +31,6 @@ fixedbitset = "0.4" # direct dependency required for derive macro bytemuck = { version = "1", features = ["derive"] } radsort = "0.1" -naga_oil = "0.9" +naga_oil = "0.10" smallvec = "1.6" thread_local = "1.0" diff --git a/crates/bevy_pbr/src/deferred/deferred_lighting.wgsl b/crates/bevy_pbr/src/deferred/deferred_lighting.wgsl index 8657bb8174ca9..2b8221a45f7bf 100644 --- a/crates/bevy_pbr/src/deferred/deferred_lighting.wgsl +++ b/crates/bevy_pbr/src/deferred/deferred_lighting.wgsl @@ -1,15 +1,14 @@ -#import bevy_pbr::prepass_utils -#import bevy_pbr::pbr_types STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT, STANDARD_MATERIAL_FLAGS_UNLIT_BIT -#import bevy_pbr::pbr_functions as pbr_functions -#import bevy_pbr::pbr_deferred_types as deferred_types -#import bevy_pbr::pbr_deferred_functions pbr_input_from_deferred_gbuffer, unpack_unorm3x4_plus_unorm_20_ -#import bevy_pbr::mesh_view_types FOG_MODE_OFF - -#import bevy_pbr::mesh_view_bindings deferred_prepass_texture, fog, view, screen_space_ambient_occlusion_texture -#import bevy_core_pipeline::tonemapping screen_space_dither, powsafe, tone_mapping +#import bevy_pbr::{ + prepass_utils, + pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, + pbr_functions, + pbr_deferred_functions::{pbr_input_from_deferred_gbuffer, unpack_unorm3x4_plus_unorm_20_}, + mesh_view_bindings::deferred_prepass_texture, +} #ifdef SCREEN_SPACE_AMBIENT_OCCLUSION -#import bevy_pbr::gtao_utils gtao_multibounce +#import bevy_pbr::mesh_view_bindings::screen_space_ambient_occlusion_texture +#import bevy_pbr::gtao_utils::gtao_multibounce #endif struct FullscreenVertexOutput { @@ -48,10 +47,10 @@ fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { let deferred_data = textureLoad(deferred_prepass_texture, vec2(frag_coord.xy), 0); #ifdef WEBGL2 - frag_coord.z = deferred_types::unpack_unorm3x4_plus_unorm_20_(deferred_data.b).w; + frag_coord.z = unpack_unorm3x4_plus_unorm_20_(deferred_data.b).w; #else #ifdef DEPTH_PREPASS - frag_coord.z = bevy_pbr::prepass_utils::prepass_depth(in.position, 0u); + frag_coord.z = prepass_utils::prepass_depth(in.position, 0u); #endif #endif diff --git a/crates/bevy_pbr/src/deferred/pbr_deferred_functions.wgsl b/crates/bevy_pbr/src/deferred/pbr_deferred_functions.wgsl index 058d437b19b54..1840faf9c49e0 100644 --- a/crates/bevy_pbr/src/deferred/pbr_deferred_functions.wgsl +++ b/crates/bevy_pbr/src/deferred/pbr_deferred_functions.wgsl @@ -1,16 +1,17 @@ #define_import_path bevy_pbr::pbr_deferred_functions -#import bevy_pbr::pbr_types PbrInput, standard_material_new, STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT, STANDARD_MATERIAL_FLAGS_UNLIT_BIT -#import bevy_pbr::pbr_deferred_types as deferred_types -#import bevy_pbr::pbr_functions as pbr_functions -#import bevy_pbr::rgb9e5 as rgb9e5 -#import bevy_pbr::mesh_view_bindings as view_bindings -#import bevy_pbr::mesh_view_bindings view -#import bevy_pbr::utils octahedral_encode, octahedral_decode -#import bevy_pbr::prepass_io VertexOutput, FragmentOutput +#import bevy_pbr::{ + pbr_types::{PbrInput, standard_material_new, STANDARD_MATERIAL_FLAGS_UNLIT_BIT}, + pbr_deferred_types as deferred_types, + pbr_functions, + rgb9e5, + mesh_view_bindings::view, + utils::{octahedral_encode, octahedral_decode}, + prepass_io::{VertexOutput, FragmentOutput}, +} #ifdef MOTION_VECTOR_PREPASS - #import bevy_pbr::pbr_prepass_functions calculate_motion_vector + #import bevy_pbr::pbr_prepass_functions::calculate_motion_vector #endif // --------------------------- diff --git a/crates/bevy_pbr/src/deferred/pbr_deferred_types.wgsl b/crates/bevy_pbr/src/deferred/pbr_deferred_types.wgsl index c9aa026d70d51..ef39307b49c22 100644 --- a/crates/bevy_pbr/src/deferred/pbr_deferred_types.wgsl +++ b/crates/bevy_pbr/src/deferred/pbr_deferred_types.wgsl @@ -1,6 +1,9 @@ #define_import_path bevy_pbr::pbr_deferred_types -#import bevy_pbr::mesh_types MESH_FLAGS_SHADOW_RECEIVER_BIT -#import bevy_pbr::pbr_types STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT, STANDARD_MATERIAL_FLAGS_UNLIT_BIT + +#import bevy_pbr::{ + mesh_types::MESH_FLAGS_SHADOW_RECEIVER_BIT, + pbr_types::{STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT, STANDARD_MATERIAL_FLAGS_UNLIT_BIT}, +} // Maximum of 8 bits available const DEFERRED_FLAGS_UNLIT_BIT: u32 = 1u; diff --git a/crates/bevy_pbr/src/environment_map/environment_map.wgsl b/crates/bevy_pbr/src/environment_map/environment_map.wgsl index ed10e2d0ba894..2288d7d07c8bb 100644 --- a/crates/bevy_pbr/src/environment_map/environment_map.wgsl +++ b/crates/bevy_pbr/src/environment_map/environment_map.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::environment_map -#import bevy_pbr::mesh_view_bindings as bindings +#import bevy_pbr::mesh_view_bindings as bindings; struct EnvironmentMapLight { diffuse: vec3, diff --git a/crates/bevy_pbr/src/prepass/prepass.wgsl b/crates/bevy_pbr/src/prepass/prepass.wgsl index ac4f7683b3325..fef278324fbe4 100644 --- a/crates/bevy_pbr/src/prepass/prepass.wgsl +++ b/crates/bevy_pbr/src/prepass/prepass.wgsl @@ -1,11 +1,13 @@ -#import bevy_pbr::prepass_bindings -#import bevy_pbr::mesh_functions -#import bevy_pbr::prepass_io Vertex, VertexOutput, FragmentOutput -#import bevy_pbr::skinning -#import bevy_pbr::morph -#import bevy_pbr::mesh_bindings mesh -#import bevy_render::instance_index get_instance_index -#import bevy_pbr::mesh_view_bindings view, previous_view_proj +#import bevy_pbr::{ + prepass_bindings, + mesh_functions, + prepass_io::{Vertex, VertexOutput, FragmentOutput}, + skinning, + morph, + mesh_view_bindings::{view, previous_view_proj}, +} + +#import bevy_render::instance_index::get_instance_index #ifdef DEFERRED_PREPASS #import bevy_pbr::rgb9e5 @@ -14,18 +16,18 @@ #ifdef MORPH_TARGETS fn morph_vertex(vertex_in: Vertex) -> Vertex { var vertex = vertex_in; - let weight_count = bevy_pbr::morph::layer_count(); + let weight_count = morph::layer_count(); for (var i: u32 = 0u; i < weight_count; i ++) { - let weight = bevy_pbr::morph::weight_at(i); + let weight = morph::weight_at(i); if weight == 0.0 { continue; } - vertex.position += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::position_offset, i); + vertex.position += weight * morph::morph(vertex.index, morph::position_offset, i); #ifdef VERTEX_NORMALS - vertex.normal += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::normal_offset, i); + vertex.normal += weight * morph::morph(vertex.index, morph::normal_offset, i); #endif #ifdef VERTEX_TANGENTS - vertex.tangent += vec4(weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); + vertex.tangent += vec4(weight * morph::morph(vertex.index, morph::tangent_offset, i), 0.0); #endif } return vertex; @@ -37,20 +39,20 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { var out: VertexOutput; #ifdef MORPH_TARGETS - var vertex = morph_vertex(vertex_no_morph); + var vertex = morph::morph_vertex(vertex_no_morph); #else var vertex = vertex_no_morph; #endif #ifdef SKINNED - var model = bevy_pbr::skinning::skin_model(vertex.joint_indices, vertex.joint_weights); + var model = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); #else // SKINNED // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. // See https://github.com/gfx-rs/naga/issues/2416 - var model = bevy_pbr::mesh_functions::get_model_matrix(vertex_no_morph.instance_index); + var model = mesh_functions::get_model_matrix(vertex_no_morph.instance_index); #endif // SKINNED - out.position = bevy_pbr::mesh_functions::mesh_position_local_to_clip(model, vec4(vertex.position, 1.0)); + out.position = mesh_functions::mesh_position_local_to_clip(model, vec4(vertex.position, 1.0)); #ifdef DEPTH_CLAMP_ORTHO out.clip_position_unclamped = out.position; out.position.z = min(out.position.z, 1.0); @@ -62,9 +64,9 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS #ifdef SKINNED - out.world_normal = bevy_pbr::skinning::skin_normals(model, vertex.normal); + out.world_normal = skinning::skin_normals(model, vertex.normal); #else // SKINNED - out.world_normal = bevy_pbr::mesh_functions::mesh_normal_local_to_world( + out.world_normal = mesh_functions::mesh_normal_local_to_world( vertex.normal, // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. // See https://github.com/gfx-rs/naga/issues/2416 @@ -73,7 +75,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #endif // SKINNED #ifdef VERTEX_TANGENTS - out.world_tangent = bevy_pbr::mesh_functions::mesh_tangent_local_to_world( + out.world_tangent = mesh_functions::mesh_tangent_local_to_world( model, vertex.tangent, // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. @@ -88,14 +90,14 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #endif #ifdef MOTION_VECTOR_PREPASS_OR_DEFERRED_PREPASS - out.world_position = bevy_pbr::mesh_functions::mesh_position_local_to_world(model, vec4(vertex.position, 1.0)); + out.world_position = mesh_functions::mesh_position_local_to_world(model, vec4(vertex.position, 1.0)); #endif // MOTION_VECTOR_PREPASS_OR_DEFERRED_PREPASS #ifdef MOTION_VECTOR_PREPASS // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. // See https://github.com/gfx-rs/naga/issues/2416 - out.previous_world_position = bevy_pbr::mesh_functions::mesh_position_local_to_world( - bevy_pbr::mesh_functions::get_previous_model_matrix(vertex_no_morph.instance_index), + out.previous_world_position = mesh_functions::mesh_position_local_to_world( + mesh_functions::get_previous_model_matrix(vertex_no_morph.instance_index), vec4(vertex.position, 1.0) ); #endif // MOTION_VECTOR_PREPASS @@ -125,7 +127,7 @@ fn fragment(in: VertexOutput) -> FragmentOutput { #ifdef MOTION_VECTOR_PREPASS let clip_position_t = view.unjittered_view_proj * in.world_position; let clip_position = clip_position_t.xy / clip_position_t.w; - let previous_clip_position_t = bevy_pbr::prepass_bindings::previous_view_proj * in.previous_world_position; + let previous_clip_position_t = prepass_bindings::previous_view_proj * in.previous_world_position; let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w; // These motion vectors are used as offsets to UV positions and are stored // in the range -1,1 to allow offsetting from the one corner to the diff --git a/crates/bevy_pbr/src/prepass/prepass_bindings.wgsl b/crates/bevy_pbr/src/prepass/prepass_bindings.wgsl index 5f5fef362acf6..4e4e8c453f572 100644 --- a/crates/bevy_pbr/src/prepass/prepass_bindings.wgsl +++ b/crates/bevy_pbr/src/prepass/prepass_bindings.wgsl @@ -1,9 +1,7 @@ #define_import_path bevy_pbr::prepass_bindings -#import bevy_pbr::mesh_types #ifdef MOTION_VECTOR_PREPASS @group(0) @binding(2) var previous_view_proj: mat4x4; #endif // MOTION_VECTOR_PREPASS // Material bindings will be in @group(1) -#import bevy_pbr::mesh_bindings mesh diff --git a/crates/bevy_pbr/src/render/clustered_forward.wgsl b/crates/bevy_pbr/src/render/clustered_forward.wgsl index e40ee57602235..d52bd6087b35d 100644 --- a/crates/bevy_pbr/src/render/clustered_forward.wgsl +++ b/crates/bevy_pbr/src/render/clustered_forward.wgsl @@ -1,7 +1,9 @@ #define_import_path bevy_pbr::clustered_forward -#import bevy_pbr::mesh_view_bindings as bindings -#import bevy_pbr::utils hsv2rgb +#import bevy_pbr::{ + mesh_view_bindings as bindings, + utils::hsv2rgb, +} // NOTE: Keep in sync with bevy_pbr/src/light.rs fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 { diff --git a/crates/bevy_pbr/src/render/fog.wgsl b/crates/bevy_pbr/src/render/fog.wgsl index 6f06aeb3572f3..a9e28ae9e9ecf 100644 --- a/crates/bevy_pbr/src/render/fog.wgsl +++ b/crates/bevy_pbr/src/render/fog.wgsl @@ -1,7 +1,9 @@ #define_import_path bevy_pbr::fog -#import bevy_pbr::mesh_view_bindings fog -#import bevy_pbr::mesh_view_types Fog +#import bevy_pbr::{ + mesh_view_bindings::fog, + mesh_view_types::Fog, +} // Fog formulas adapted from: // https://learn.microsoft.com/en-us/windows/win32/direct3d9/fog-formulas diff --git a/crates/bevy_pbr/src/render/mesh.wgsl b/crates/bevy_pbr/src/render/mesh.wgsl index 641bbeaf91eb1..a29345fbcee7f 100644 --- a/crates/bevy_pbr/src/render/mesh.wgsl +++ b/crates/bevy_pbr/src/render/mesh.wgsl @@ -1,9 +1,10 @@ -#import bevy_pbr::mesh_functions as mesh_functions -#import bevy_pbr::skinning -#import bevy_pbr::morph -#import bevy_pbr::mesh_bindings mesh -#import bevy_pbr::forward_io Vertex, VertexOutput -#import bevy_render::instance_index get_instance_index +#import bevy_pbr::{ + mesh_functions, + skinning, + morph::morph, + forward_io::{Vertex, VertexOutput}, +} +#import bevy_render::instance_index::get_instance_index #ifdef MORPH_TARGETS fn morph_vertex(vertex_in: Vertex) -> Vertex { @@ -14,12 +15,12 @@ fn morph_vertex(vertex_in: Vertex) -> Vertex { if weight == 0.0 { continue; } - vertex.position += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::position_offset, i); + vertex.position += weight * morph(vertex.index, bevy_pbr::morph::position_offset, i); #ifdef VERTEX_NORMALS - vertex.normal += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::normal_offset, i); + vertex.normal += weight * morph(vertex.index, bevy_pbr::morph::normal_offset, i); #endif #ifdef VERTEX_TANGENTS - vertex.tangent += vec4(weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); + vertex.tangent += vec4(weight * morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); #endif } return vertex; @@ -37,7 +38,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #endif #ifdef SKINNED - var model = bevy_pbr::skinning::skin_model(vertex.joint_indices, vertex.joint_weights); + var model = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); #else // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. // See https://github.com/gfx-rs/naga/issues/2416 . @@ -46,7 +47,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #ifdef VERTEX_NORMALS #ifdef SKINNED - out.world_normal = bevy_pbr::skinning::skin_normals(model, vertex.normal); + out.world_normal = skinning::skin_normals(model, vertex.normal); #else out.world_normal = mesh_functions::mesh_normal_local_to_world( vertex.normal, diff --git a/crates/bevy_pbr/src/render/mesh_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_bindings.wgsl index 1d8cf31221350..81cca44e6c591 100644 --- a/crates/bevy_pbr/src/render/mesh_bindings.wgsl +++ b/crates/bevy_pbr/src/render/mesh_bindings.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::mesh_bindings -#import bevy_pbr::mesh_types Mesh +#import bevy_pbr::mesh_types::Mesh #ifdef MESH_BINDGROUP_1 diff --git a/crates/bevy_pbr/src/render/mesh_functions.wgsl b/crates/bevy_pbr/src/render/mesh_functions.wgsl index a0c689d3299da..830536c57ddc6 100644 --- a/crates/bevy_pbr/src/render/mesh_functions.wgsl +++ b/crates/bevy_pbr/src/render/mesh_functions.wgsl @@ -1,10 +1,14 @@ #define_import_path bevy_pbr::mesh_functions -#import bevy_pbr::mesh_view_bindings view -#import bevy_pbr::mesh_bindings mesh -#import bevy_pbr::mesh_types MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT -#import bevy_render::instance_index get_instance_index -#import bevy_render::maths affine_to_square, mat2x4_f32_to_mat3x3_unpack +#import bevy_pbr::{ + mesh_view_bindings::view, + mesh_bindings::mesh, + mesh_types::MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT, +} +#import bevy_render::{ + instance_index::get_instance_index, + maths::{affine_to_square, mat2x4_f32_to_mat3x3_unpack}, +} fn get_model_matrix(instance_index: u32) -> mat4x4 { return affine_to_square(mesh[get_instance_index(instance_index)].model); diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl index 6d68aa305a091..1e863f4207f5c 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl @@ -1,8 +1,10 @@ #define_import_path bevy_pbr::mesh_view_bindings #import bevy_pbr::mesh_view_types as types -#import bevy_render::view View -#import bevy_render::globals Globals +#import bevy_render::{ + view::View, + globals::Globals, +} @group(0) @binding(0) var view: View; @group(0) @binding(1) var lights: types::Lights; diff --git a/crates/bevy_pbr/src/render/mesh_view_types.wgsl b/crates/bevy_pbr/src/render/mesh_view_types.wgsl index b944aa2792e2f..f115d49d7898a 100644 --- a/crates/bevy_pbr/src/render/mesh_view_types.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_types.wgsl @@ -1,8 +1,5 @@ #define_import_path bevy_pbr::mesh_view_types -#import bevy_render::view -#import bevy_render::globals - struct PointLight { // For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3] // For spot lights: the direction (x,z), spot_scale and spot_offset diff --git a/crates/bevy_pbr/src/render/morph.wgsl b/crates/bevy_pbr/src/render/morph.wgsl index 291b3efb5841a..7355f95f33e0c 100644 --- a/crates/bevy_pbr/src/render/morph.wgsl +++ b/crates/bevy_pbr/src/render/morph.wgsl @@ -1,15 +1,8 @@ -// If using this WGSL snippet as an #import, the following should be in scope: -// -// - the `morph_weights` uniform of type `MorphWeights` -// - the `morph_targets` 3d texture -// -// They are defined in `mesh_types.wgsl` and `mesh_bindings.wgsl`. - #define_import_path bevy_pbr::morph #ifdef MORPH_TARGETS -#import bevy_pbr::mesh_types MorphWeights +#import bevy_pbr::mesh_types::MorphWeights; #ifdef MESH_BINDGROUP_1 @@ -61,4 +54,4 @@ fn morph(vertex_index: u32, component_offset: u32, weight_index: u32) -> vec3) -> f32 { // We use `textureSampleLevel` over `textureSample` because the wgpu DX12 diff --git a/crates/bevy_pbr/src/render/pbr.wgsl b/crates/bevy_pbr/src/render/pbr.wgsl index d531cf4060870..def70dd89b3ed 100644 --- a/crates/bevy_pbr/src/render/pbr.wgsl +++ b/crates/bevy_pbr/src/render/pbr.wgsl @@ -1,13 +1,19 @@ -#import bevy_pbr::pbr_functions alpha_discard -#import bevy_pbr::pbr_fragment pbr_input_from_standard_material +#import bevy_pbr::{ + pbr_functions::alpha_discard, + pbr_fragment::pbr_input_from_standard_material, +} #ifdef PREPASS_PIPELINE -#import bevy_pbr::prepass_io VertexOutput, FragmentOutput -#import bevy_pbr::pbr_deferred_functions deferred_output +#import bevy_pbr::{ + prepass_io::{VertexOutput, FragmentOutput}, + pbr_deferred_functions::deferred_output, +} #else -#import bevy_pbr::forward_io VertexOutput, FragmentOutput -#import bevy_pbr::pbr_functions apply_pbr_lighting, main_pass_post_lighting_processing -#import bevy_pbr::pbr_types STANDARD_MATERIAL_FLAGS_UNLIT_BIT +#import bevy_pbr::{ + forward_io::{VertexOutput, FragmentOutput}, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, + pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, +} #endif @fragment diff --git a/crates/bevy_pbr/src/render/pbr_ambient.wgsl b/crates/bevy_pbr/src/render/pbr_ambient.wgsl index 28afd5588d3ec..23d5cf29b235a 100644 --- a/crates/bevy_pbr/src/render/pbr_ambient.wgsl +++ b/crates/bevy_pbr/src/render/pbr_ambient.wgsl @@ -1,7 +1,9 @@ #define_import_path bevy_pbr::ambient -#import bevy_pbr::lighting EnvBRDFApprox, F_AB -#import bevy_pbr::mesh_view_bindings lights +#import bevy_pbr::{ + lighting::{EnvBRDFApprox, F_AB}, + mesh_view_bindings::lights, +} // A precomputed `NdotV` is provided because it is computed regardless, // but `world_normal` and the view vector `V` are provided separately for more advanced uses. diff --git a/crates/bevy_pbr/src/render/pbr_bindings.wgsl b/crates/bevy_pbr/src/render/pbr_bindings.wgsl index 73a414f0c77d1..fc5cdd280c2b9 100644 --- a/crates/bevy_pbr/src/render/pbr_bindings.wgsl +++ b/crates/bevy_pbr/src/render/pbr_bindings.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::pbr_bindings -#import bevy_pbr::pbr_types StandardMaterial +#import bevy_pbr::pbr_types::StandardMaterial @group(1) @binding(0) var material: StandardMaterial; @group(1) @binding(1) var base_color_texture: texture_2d; diff --git a/crates/bevy_pbr/src/render/pbr_fragment.wgsl b/crates/bevy_pbr/src/render/pbr_fragment.wgsl index 390593a6861cc..47d4f6f6d883b 100644 --- a/crates/bevy_pbr/src/render/pbr_fragment.wgsl +++ b/crates/bevy_pbr/src/render/pbr_fragment.wgsl @@ -1,22 +1,24 @@ #define_import_path bevy_pbr::pbr_fragment -#import bevy_pbr::pbr_functions as pbr_functions -#import bevy_pbr::pbr_bindings as pbr_bindings -#import bevy_pbr::pbr_types as pbr_types -#import bevy_pbr::prepass_utils - -#import bevy_pbr::mesh_bindings mesh -#import bevy_pbr::mesh_view_bindings view, screen_space_ambient_occlusion_texture -#import bevy_pbr::parallax_mapping parallaxed_uv +#import bevy_pbr::{ + pbr_functions, + pbr_bindings, + pbr_types, + prepass_utils, + mesh_bindings::mesh, + mesh_view_bindings::view, + parallax_mapping::parallaxed_uv, +} #ifdef SCREEN_SPACE_AMBIENT_OCCLUSION -#import bevy_pbr::gtao_utils gtao_multibounce +#import bevy_pbr::mesh_view_bindings::screen_space_ambient_occlusion_texture +#import bevy_pbr::gtao_utils::gtao_multibounce #endif #ifdef PREPASS_PIPELINE -#import bevy_pbr::prepass_io VertexOutput +#import bevy_pbr::prepass_io::VertexOutput #else -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput #endif // prepare a basic PbrInput from the vertex stage output, mesh binding and view binding @@ -44,7 +46,7 @@ fn pbr_input_from_vertex_output( ); #ifdef LOAD_PREPASS_NORMALS - pbr_input.N = bevy_pbr::prepass_utils::prepass_normal(in.position, 0u); + pbr_input.N = prepass_utils::prepass_normal(in.position, 0u); #else pbr_input.N = normalize(pbr_input.world_normal); #endif diff --git a/crates/bevy_pbr/src/render/pbr_functions.wgsl b/crates/bevy_pbr/src/render/pbr_functions.wgsl index 72731cc366991..9979f75be0d3a 100644 --- a/crates/bevy_pbr/src/render/pbr_functions.wgsl +++ b/crates/bevy_pbr/src/render/pbr_functions.wgsl @@ -1,24 +1,23 @@ #define_import_path bevy_pbr::pbr_functions -#ifdef TONEMAP_IN_SHADER -#import bevy_core_pipeline::tonemapping -#endif +#import bevy_pbr::{ + pbr_types, + pbr_bindings, + mesh_view_bindings as view_bindings, + mesh_view_types, + lighting, + clustered_forward as clustering, + shadows, + ambient, + mesh_types::MESH_FLAGS_SHADOW_RECEIVER_BIT, +} -#import bevy_pbr::pbr_types as pbr_types -#import bevy_pbr::pbr_bindings as pbr_bindings -#import bevy_pbr::mesh_view_bindings as view_bindings -#import bevy_pbr::mesh_view_types as mesh_view_types -#import bevy_pbr::lighting as lighting -#import bevy_pbr::clustered_forward as clustering -#import bevy_pbr::shadows as shadows -#import bevy_pbr::fog -#import bevy_pbr::ambient as ambient #ifdef ENVIRONMENT_MAP #import bevy_pbr::environment_map #endif -#import bevy_core_pipeline::tonemapping screen_space_dither, powsafe, tone_mapping -#import bevy_pbr::mesh_types MESH_FLAGS_SHADOW_RECEIVER_BIT +#import bevy_core_pipeline::tonemapping::{screen_space_dither, powsafe, tone_mapping} + fn alpha_discard(material: pbr_types::StandardMaterial, output_color: vec4) -> vec4 { var color = output_color; @@ -224,7 +223,7 @@ fn apply_pbr_lighting( // Environment map light (indirect) #ifdef ENVIRONMENT_MAP - let environment_light = bevy_pbr::environment_map::environment_map_light(perceptual_roughness, roughness, diffuse_color, NdotV, f_ab, in.N, R, F0); + let environment_light = environment_map::environment_map_light(perceptual_roughness, roughness, diffuse_color, NdotV, f_ab, in.N, R, F0); indirect_light += (environment_light.diffuse * occlusion) + environment_light.specular; #endif diff --git a/crates/bevy_pbr/src/render/pbr_lighting.wgsl b/crates/bevy_pbr/src/render/pbr_lighting.wgsl index 6658c43034060..bc279ca594ad3 100644 --- a/crates/bevy_pbr/src/render/pbr_lighting.wgsl +++ b/crates/bevy_pbr/src/render/pbr_lighting.wgsl @@ -1,8 +1,10 @@ #define_import_path bevy_pbr::lighting -#import bevy_pbr::utils PI -#import bevy_pbr::mesh_view_types as view_types -#import bevy_pbr::mesh_view_bindings as view_bindings +#import bevy_pbr::{ + utils::PI, + mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, + mesh_view_bindings as view_bindings, +} // From the Filament design doc // https://google.github.io/filament/Filament.html#table_symbols @@ -253,7 +255,7 @@ fn spot_light( // reconstruct spot dir from x/z and y-direction flag var spot_dir = vec3((*light).light_custom_data.x, 0.0, (*light).light_custom_data.y); spot_dir.y = sqrt(max(0.0, 1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z)); - if ((*light).flags & view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u { + if ((*light).flags & POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u { spot_dir.y = -spot_dir.y; } let light_to_frag = (*light).position_radius.xyz - world_position.xyz; diff --git a/crates/bevy_pbr/src/render/pbr_prepass.wgsl b/crates/bevy_pbr/src/render/pbr_prepass.wgsl index 479d64ce236c3..5af33bfc3e469 100644 --- a/crates/bevy_pbr/src/render/pbr_prepass.wgsl +++ b/crates/bevy_pbr/src/render/pbr_prepass.wgsl @@ -1,12 +1,11 @@ -#import bevy_pbr::pbr_prepass_functions -#import bevy_pbr::pbr_bindings -#import bevy_pbr::pbr_types -#ifdef NORMAL_PREPASS -#import bevy_pbr::pbr_functions -#endif // NORMAL_PREPASS - -#import bevy_pbr::prepass_io as prepass_io -#import bevy_pbr::mesh_view_bindings view +#import bevy_pbr::{ + pbr_prepass_functions, + pbr_bindings::material, + pbr_types, + pbr_functions, + prepass_io, + mesh_view_bindings::view, +} #ifdef PREPASS_FRAGMENT @fragment @@ -14,7 +13,7 @@ fn fragment( in: prepass_io::VertexOutput, @builtin(front_facing) is_front: bool, ) -> prepass_io::FragmentOutput { - bevy_pbr::pbr_prepass_functions::prepass_alpha_discard(in); + pbr_prepass_functions::prepass_alpha_discard(in); var out: prepass_io::FragmentOutput; @@ -24,15 +23,15 @@ fn fragment( #ifdef NORMAL_PREPASS // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit - if (bevy_pbr::pbr_bindings::material.flags & bevy_pbr::pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { - let world_normal = bevy_pbr::pbr_functions::prepare_world_normal( + if (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { + let world_normal = pbr_functions::prepare_world_normal( in.world_normal, - (bevy_pbr::pbr_bindings::material.flags & bevy_pbr::pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u, + (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u, is_front, ); - let normal = bevy_pbr::pbr_functions::apply_normal_mapping( - bevy_pbr::pbr_bindings::material.flags, + let normal = pbr_functions::apply_normal_mapping( + material.flags, world_normal, #ifdef VERTEX_TANGENTS #ifdef STANDARDMATERIAL_NORMAL_MAP @@ -52,7 +51,7 @@ fn fragment( #endif // NORMAL_PREPASS #ifdef MOTION_VECTOR_PREPASS - out.motion_vector = bevy_pbr::pbr_prepass_functions::calculate_motion_vector(in.world_position, in.previous_world_position); + out.motion_vector = pbr_prepass_functions::calculate_motion_vector(in.world_position, in.previous_world_position); #endif return out; @@ -60,6 +59,6 @@ fn fragment( #else @fragment fn fragment(in: prepass_io::VertexOutput) { - bevy_pbr::pbr_prepass_functions::prepass_alpha_discard(in); + pbr_prepass_functions::prepass_alpha_discard(in); } #endif // PREPASS_FRAGMENT diff --git a/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl b/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl index 50afd8380c2a4..176c56aa1aba7 100644 --- a/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl +++ b/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl @@ -1,11 +1,12 @@ #define_import_path bevy_pbr::pbr_prepass_functions -#import bevy_pbr::prepass_io VertexOutput -#import bevy_pbr::prepass_bindings previous_view_proj -#import bevy_pbr::mesh_view_bindings view - -#import bevy_pbr::pbr_bindings as pbr_bindings -#import bevy_pbr::pbr_types as pbr_types +#import bevy_pbr::{ + prepass_io::VertexOutput, + prepass_bindings::previous_view_proj, + mesh_view_bindings::view, + pbr_bindings, + pbr_types, +} // Cutoff used for the premultiplied alpha modes BLEND and ADD. const PREMULTIPLIED_ALPHA_CUTOFF = 0.05; diff --git a/crates/bevy_pbr/src/render/shadow_sampling.wgsl b/crates/bevy_pbr/src/render/shadow_sampling.wgsl index 04d8920307a6c..0a93d5468b06b 100644 --- a/crates/bevy_pbr/src/render/shadow_sampling.wgsl +++ b/crates/bevy_pbr/src/render/shadow_sampling.wgsl @@ -1,7 +1,9 @@ #define_import_path bevy_pbr::shadow_sampling -#import bevy_pbr::mesh_view_bindings as view_bindings -#import bevy_pbr::utils PI +#import bevy_pbr::{ + mesh_view_bindings as view_bindings, + utils::PI, +} // Do the lookup, using HW 2x2 PCF and comparison fn sample_shadow_map_hardware(light_local: vec2, depth: f32, array_index: i32) -> f32 { diff --git a/crates/bevy_pbr/src/render/shadows.wgsl b/crates/bevy_pbr/src/render/shadows.wgsl index 9ace738252ea6..4ccbd5f850142 100644 --- a/crates/bevy_pbr/src/render/shadows.wgsl +++ b/crates/bevy_pbr/src/render/shadows.wgsl @@ -1,9 +1,11 @@ #define_import_path bevy_pbr::shadows -#import bevy_pbr::mesh_view_types POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE -#import bevy_pbr::mesh_view_bindings as view_bindings -#import bevy_pbr::utils hsv2rgb -#import bevy_pbr::shadow_sampling sample_shadow_map +#import bevy_pbr::{ + mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, + mesh_view_bindings as view_bindings, + utils::hsv2rgb, + shadow_sampling::sample_shadow_map +} const flip_z: vec3 = vec3(1.0, 1.0, -1.0); diff --git a/crates/bevy_pbr/src/render/skinning.wgsl b/crates/bevy_pbr/src/render/skinning.wgsl index 24678619a34c1..3f23629d1d4df 100644 --- a/crates/bevy_pbr/src/render/skinning.wgsl +++ b/crates/bevy_pbr/src/render/skinning.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::skinning -#import bevy_pbr::mesh_types SkinnedMesh +#import bevy_pbr::mesh_types::SkinnedMesh #ifdef SKINNED diff --git a/crates/bevy_pbr/src/render/wireframe.wgsl b/crates/bevy_pbr/src/render/wireframe.wgsl index b15b25175e565..ed5c24b4b21f3 100644 --- a/crates/bevy_pbr/src/render/wireframe.wgsl +++ b/crates/bevy_pbr/src/render/wireframe.wgsl @@ -1,4 +1,5 @@ -#import bevy_pbr::forward_io VertexOutput +#import bevy_pbr::forward_io::VertexOutput + struct WireframeMaterial { color: vec4, }; diff --git a/crates/bevy_pbr/src/ssao/gtao.wgsl b/crates/bevy_pbr/src/ssao/gtao.wgsl index 67f140a98866f..075612fd508f5 100644 --- a/crates/bevy_pbr/src/ssao/gtao.wgsl +++ b/crates/bevy_pbr/src/ssao/gtao.wgsl @@ -5,10 +5,14 @@ // Source code heavily based on XeGTAO v1.30 from Intel // https://github.com/GameTechDev/XeGTAO/blob/0d177ce06bfa642f64d8af4de1197ad1bcb862d4/Source/Rendering/Shaders/XeGTAO.hlsli -#import bevy_pbr::gtao_utils fast_acos -#import bevy_pbr::utils PI, HALF_PI -#import bevy_render::view View -#import bevy_render::globals Globals +#import bevy_pbr::{ + gtao_utils::fast_acos, + utils::{PI, HALF_PI}, +} +#import bevy_render::{ + view::View, + globals::Globals, +} @group(0) @binding(0) var preprocessed_depth: texture_2d; @group(0) @binding(1) var normals: texture_2d; diff --git a/crates/bevy_pbr/src/ssao/gtao_utils.wgsl b/crates/bevy_pbr/src/ssao/gtao_utils.wgsl index 11233ba05226b..f081393edb395 100644 --- a/crates/bevy_pbr/src/ssao/gtao_utils.wgsl +++ b/crates/bevy_pbr/src/ssao/gtao_utils.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_pbr::gtao_utils -#import bevy_pbr::utils PI, HALF_PI +#import bevy_pbr::utils::{PI, HALF_PI} // Approximates single-bounce ambient occlusion to multi-bounce ambient occlusion // https://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf#page=78 diff --git a/crates/bevy_pbr/src/ssao/preprocess_depth.wgsl b/crates/bevy_pbr/src/ssao/preprocess_depth.wgsl index d977148609d53..73dccaa02c09a 100644 --- a/crates/bevy_pbr/src/ssao/preprocess_depth.wgsl +++ b/crates/bevy_pbr/src/ssao/preprocess_depth.wgsl @@ -5,7 +5,7 @@ // Reference: https://research.nvidia.com/sites/default/files/pubs/2012-06_Scalable-Ambient-Obscurance/McGuire12SAO.pdf, section 2.2 -#import bevy_render::view View +#import bevy_render::view::View @group(0) @binding(0) var input_depth: texture_depth_2d; @group(0) @binding(1) var preprocessed_depth_mip0: texture_storage_2d; diff --git a/crates/bevy_pbr/src/ssao/spatial_denoise.wgsl b/crates/bevy_pbr/src/ssao/spatial_denoise.wgsl index 4cfe4cd350871..2448db309fce7 100644 --- a/crates/bevy_pbr/src/ssao/spatial_denoise.wgsl +++ b/crates/bevy_pbr/src/ssao/spatial_denoise.wgsl @@ -9,7 +9,7 @@ // XeGTAO does a 3x3 filter, on two pixels at a time per compute thread, applied twice // We do a 3x3 filter, on 1 pixel per compute thread, applied once -#import bevy_render::view View +#import bevy_render::view::View @group(0) @binding(0) var ambient_occlusion_noisy: texture_2d; @group(0) @binding(1) var depth_differences: texture_2d; diff --git a/crates/bevy_render/Cargo.toml b/crates/bevy_render/Cargo.toml index 28622d0f8ada5..9878284469930 100644 --- a/crates/bevy_render/Cargo.toml +++ b/crates/bevy_render/Cargo.toml @@ -62,7 +62,7 @@ codespan-reporting = "0.11.0" # It is enabled for now to avoid having to do a significant overhaul of the renderer just for wasm wgpu = { version = "0.17.1", features = ["naga", "fragile-send-sync-non-atomic-wasm"] } naga = { version = "0.13.0", features = ["wgsl-in"] } -naga_oil = "0.9" +naga_oil = "0.10" serde = { version = "1", features = ["derive"] } bitflags = "2.3" bytemuck = { version = "1.5", features = ["derive"] } diff --git a/crates/bevy_sprite/src/mesh2d/color_material.wgsl b/crates/bevy_sprite/src/mesh2d/color_material.wgsl index 6f125a83b18ba..1ed5d75341b0c 100644 --- a/crates/bevy_sprite/src/mesh2d/color_material.wgsl +++ b/crates/bevy_sprite/src/mesh2d/color_material.wgsl @@ -1,6 +1,7 @@ -#import bevy_sprite::mesh2d_types Mesh2d -#import bevy_sprite::mesh2d_vertex_output VertexOutput -#import bevy_sprite::mesh2d_view_bindings view +#import bevy_sprite::{ + mesh2d_vertex_output::VertexOutput, + mesh2d_view_bindings::view, +} #ifdef TONEMAP_IN_SHADER #import bevy_core_pipeline::tonemapping @@ -29,7 +30,7 @@ fn fragment( output_color = output_color * textureSample(texture, texture_sampler, mesh.uv); } #ifdef TONEMAP_IN_SHADER - output_color = bevy_core_pipeline::tonemapping::tone_mapping(output_color, view.color_grading); + output_color = tonemapping::tone_mapping(output_color, view.color_grading); #endif return output_color; } diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl index a3a8d118c199d..00c1ec8442ad2 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d.wgsl @@ -1,7 +1,8 @@ -#import bevy_sprite::mesh2d_functions as mesh_functions -#import bevy_sprite::mesh2d_bindings mesh -#import bevy_sprite::mesh2d_vertex_output VertexOutput -#import bevy_sprite::mesh2d_view_bindings view +#import bevy_sprite::{ + mesh2d_functions as mesh_functions, + mesh2d_vertex_output::VertexOutput, + mesh2d_view_bindings::view, +} #ifdef TONEMAP_IN_SHADER #import bevy_core_pipeline::tonemapping @@ -66,7 +67,7 @@ fn fragment( #ifdef VERTEX_COLORS var color = in.color; #ifdef TONEMAP_IN_SHADER - color = bevy_core_pipeline::tonemapping::tone_mapping(color, view.color_grading); + color = tonemapping::tone_mapping(color, view.color_grading); #endif return color; #else diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl index e673ef23f06b6..3c3ec0906efcc 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_bindings.wgsl @@ -1,6 +1,6 @@ #define_import_path bevy_sprite::mesh2d_bindings -#import bevy_sprite::mesh2d_types Mesh2d +#import bevy_sprite::mesh2d_types::Mesh2d #ifdef MESH_BINDGROUP_1 diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl index b936cad10f66f..b2bc92a00b73c 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl @@ -1,9 +1,13 @@ #define_import_path bevy_sprite::mesh2d_functions -#import bevy_sprite::mesh2d_view_bindings view -#import bevy_sprite::mesh2d_bindings mesh -#import bevy_render::instance_index get_instance_index -#import bevy_render::maths affine_to_square, mat2x4_f32_to_mat3x3_unpack +#import bevy_sprite::{ + mesh2d_view_bindings::view, + mesh2d_bindings::mesh, +} +#import bevy_render::{ + instance_index::get_instance_index, + maths::{affine_to_square, mat2x4_f32_to_mat3x3_unpack}, +} fn get_model_matrix(instance_index: u32) -> mat4x4 { return affine_to_square(mesh[get_instance_index(instance_index)].model); diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl index 55eb7b964c23a..8b2f57d6eaf94 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings.wgsl @@ -1,7 +1,7 @@ #define_import_path bevy_sprite::mesh2d_view_bindings -#import bevy_render::view View -#import bevy_render::globals Globals +#import bevy_render::view::View +#import bevy_render::globals::Globals @group(0) @binding(0) var view: View; diff --git a/crates/bevy_sprite/src/render/sprite.wgsl b/crates/bevy_sprite/src/render/sprite.wgsl index 536971e5cb6e6..1f5c0125e17ae 100644 --- a/crates/bevy_sprite/src/render/sprite.wgsl +++ b/crates/bevy_sprite/src/render/sprite.wgsl @@ -2,8 +2,10 @@ #import bevy_core_pipeline::tonemapping #endif -#import bevy_render::maths affine_to_square -#import bevy_render::view View +#import bevy_render::{ + maths::affine_to_square, + view::View, +} @group(0) @binding(0) var view: View; @@ -54,7 +56,7 @@ fn fragment(in: VertexOutput) -> @location(0) vec4 { var color = in.color * textureSample(sprite_texture, sprite_sampler, in.uv); #ifdef TONEMAP_IN_SHADER - color = bevy_core_pipeline::tonemapping::tone_mapping(color, view.color_grading); + color = tonemapping::tone_mapping(color, view.color_grading); #endif return color; diff --git a/crates/bevy_ui/src/render/ui.wgsl b/crates/bevy_ui/src/render/ui.wgsl index 5014c4da4baec..aeb57aad81358 100644 --- a/crates/bevy_ui/src/render/ui.wgsl +++ b/crates/bevy_ui/src/render/ui.wgsl @@ -1,4 +1,4 @@ -#import bevy_render::view View +#import bevy_render::view::View const TEXTURED_QUAD: u32 = 0u; diff --git a/examples/2d/mesh2d_manual.rs b/examples/2d/mesh2d_manual.rs index 900b7645efd41..0b8856c4132de 100644 --- a/examples/2d/mesh2d_manual.rs +++ b/examples/2d/mesh2d_manual.rs @@ -219,8 +219,7 @@ type DrawColoredMesh2d = ( // using `include_str!()`, or loaded like any other asset with `asset_server.load()`. const COLORED_MESH2D_SHADER: &str = r" // Import the standard 2d mesh uniforms and set their bind groups -#import bevy_sprite::mesh2d_bindings mesh -#import bevy_sprite::mesh2d_functions as MeshFunctions +#import bevy_sprite::mesh2d_functions // The structure of the vertex buffer is as specified in `specialize()` struct Vertex { @@ -241,8 +240,8 @@ struct VertexOutput { fn vertex(vertex: Vertex) -> VertexOutput { var out: VertexOutput; // Project the world position of the mesh into screen position - let model = MeshFunctions::get_model_matrix(vertex.instance_index); - out.clip_position = MeshFunctions::mesh2d_position_local_to_clip(model, vec4(vertex.position, 1.0)); + let model = mesh2d_functions::get_model_matrix(vertex.instance_index); + out.clip_position = mesh2d_functions::mesh2d_position_local_to_clip(model, vec4(vertex.position, 1.0)); // Unpack the `u32` from the vertex buffer into the `vec4` used by the fragment shader out.color = vec4((vec4(vertex.color) >> vec4(0u, 8u, 16u, 24u)) & vec4(255u)) / 255.0; return out; From 6f2a5cb862d747c9068bf83996c6c1a9f9d3b901 Mon Sep 17 00:00:00 2001 From: robtfm <50659922+robtfm@users.noreply.github.com> Date: Sat, 21 Oct 2023 16:39:22 +0100 Subject: [PATCH 46/63] Bind group entries (#9694) # Objective Simplify bind group creation code. alternative to (and based on) #9476 ## Solution - Add a `BindGroupEntries` struct that can transparently be used where `&[BindGroupEntry<'b>]` is required in BindGroupDescriptors. Allows constructing the descriptor's entries as: ```rust render_device.create_bind_group( "my_bind_group", &my_layout, &BindGroupEntries::with_indexes(( (2, &my_sampler), (3, my_uniform), )), ); ``` instead of ```rust render_device.create_bind_group( "my_bind_group", &my_layout, &[ BindGroupEntry { binding: 2, resource: BindingResource::Sampler(&my_sampler), }, BindGroupEntry { binding: 3, resource: my_uniform, }, ], ); ``` or ```rust render_device.create_bind_group( "my_bind_group", &my_layout, &BindGroupEntries::sequential((&my_sampler, my_uniform)), ); ``` instead of ```rust render_device.create_bind_group( "my_bind_group", &my_layout, &[ BindGroupEntry { binding: 0, resource: BindingResource::Sampler(&my_sampler), }, BindGroupEntry { binding: 1, resource: my_uniform, }, ], ); ``` the structs has no user facing macros, is tuple-type-based so stack allocated, and has no noticeable impact on compile time. - Also adds a `DynamicBindGroupEntries` struct with a similar api that uses a `Vec` under the hood and allows extending the entries. - Modifies `RenderDevice::create_bind_group` to take separate arguments `label`, `layout` and `entries` instead of a `BindGroupDescriptor` struct. The struct can't be stored due to the internal references, and with only 3 members arguably does not add enough context to justify itself. - Modify the codebase to use the new api and the `BindGroupEntries` / `DynamicBindGroupEntries` structs where appropriate (whenever the entries slice contains more than 1 member). ## Migration Guide - Calls to `RenderDevice::create_bind_group({BindGroupDescriptor { label, layout, entries })` must be amended to `RenderDevice::create_bind_group(label, layout, entries)`. - If `label`s have been specified as `"bind_group_name".into()`, they need to change to just `"bind_group_name"`. `Some("bind_group_name")` and `None` will still work, but `Some("bind_group_name")` can optionally be simplified to just `"bind_group_name"`. --------- Co-authored-by: IceSentry --- crates/bevy_core_pipeline/src/bloom/mod.rs | 88 ++---- .../src/contrast_adaptive_sharpening/node.rs | 36 +-- .../src/deferred/copy_lighting_id.rs | 22 +- crates/bevy_core_pipeline/src/fxaa/node.rs | 27 +- .../bevy_core_pipeline/src/msaa_writeback.rs | 23 +- crates/bevy_core_pipeline/src/skybox/mod.rs | 41 +-- crates/bevy_core_pipeline/src/taa/mod.rs | 65 ++-- .../bevy_core_pipeline/src/tonemapping/mod.rs | 14 +- .../src/tonemapping/node.rs | 48 +-- .../bevy_core_pipeline/src/upscaling/node.rs | 27 +- crates/bevy_gizmos/src/lib.rs | 15 +- crates/bevy_pbr/src/deferred/mod.rs | 15 +- crates/bevy_pbr/src/environment_map/mod.rs | 22 +- crates/bevy_pbr/src/prepass/mod.rs | 50 +--- .../bevy_pbr/src/prepass/prepass_bindings.rs | 57 +--- crates/bevy_pbr/src/render/mesh_bindings.rs | 43 ++- .../bevy_pbr/src/render/mesh_view_bindings.rs | 140 +++------ crates/bevy_pbr/src/ssao/mod.rs | 223 ++++---------- .../src/render_resource/bind_group.rs | 11 +- .../src/render_resource/bind_group_entries.rs | 282 ++++++++++++++++++ crates/bevy_render/src/render_resource/mod.rs | 2 + .../src/render_resource/uniform_buffer.rs | 19 ++ .../bevy_render/src/renderer/render_device.rs | 17 +- crates/bevy_render/src/view/window/mod.rs | 17 +- crates/bevy_sprite/src/mesh2d/mesh.rs | 32 +- crates/bevy_sprite/src/render/mod.rs | 39 +-- crates/bevy_ui/src/render/mod.rs | 41 +-- crates/bevy_utils/macros/src/lib.rs | 35 +++ .../shader/compute_shader_game_of_life.rs | 13 +- examples/shader/post_processing.rs | 49 ++- examples/shader/texture_binding_array.rs | 24 +- 31 files changed, 704 insertions(+), 833 deletions(-) create mode 100644 crates/bevy_render/src/render_resource/bind_group_entries.rs diff --git a/crates/bevy_core_pipeline/src/bloom/mod.rs b/crates/bevy_core_pipeline/src/bloom/mod.rs index 9f8d0aec2946f..c1299794df570 100644 --- a/crates/bevy_core_pipeline/src/bloom/mod.rs +++ b/crates/bevy_core_pipeline/src/bloom/mod.rs @@ -170,30 +170,16 @@ impl ViewNode for BloomNode { // First downsample pass { - let downsampling_first_bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("bloom_downsampling_first_bind_group"), - layout: &downsampling_pipeline_res.bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - // Read from main texture directly - resource: BindingResource::TextureView( - view_target.main_texture_view(), - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&bind_groups.sampler), - }, - BindGroupEntry { - binding: 2, - resource: uniforms.clone(), - }, - ], - }); + let downsampling_first_bind_group = render_context.render_device().create_bind_group( + "bloom_downsampling_first_bind_group", + &downsampling_pipeline_res.bind_group_layout, + &BindGroupEntries::sequential(( + // Read from main texture directly + view_target.main_texture_view(), + &bind_groups.sampler, + uniforms.clone(), + )), + ); let view = &bloom_texture.view(0); let mut downsampling_first_pass = @@ -416,46 +402,28 @@ fn prepare_bloom_bind_groups( let mut downsampling_bind_groups = Vec::with_capacity(bind_group_count); for mip in 1..bloom_texture.mip_count { - downsampling_bind_groups.push(render_device.create_bind_group(&BindGroupDescriptor { - label: Some("bloom_downsampling_bind_group"), - layout: &downsampling_pipeline.bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(&bloom_texture.view(mip - 1)), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(sampler), - }, - BindGroupEntry { - binding: 2, - resource: uniforms.binding().unwrap(), - }, - ], - })); + downsampling_bind_groups.push(render_device.create_bind_group( + "bloom_downsampling_bind_group", + &downsampling_pipeline.bind_group_layout, + &BindGroupEntries::sequential(( + &bloom_texture.view(mip - 1), + sampler, + uniforms.binding().unwrap(), + )), + )); } let mut upsampling_bind_groups = Vec::with_capacity(bind_group_count); for mip in (0..bloom_texture.mip_count).rev() { - upsampling_bind_groups.push(render_device.create_bind_group(&BindGroupDescriptor { - label: Some("bloom_upsampling_bind_group"), - layout: &upsampling_pipeline.bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(&bloom_texture.view(mip)), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(sampler), - }, - BindGroupEntry { - binding: 2, - resource: uniforms.binding().unwrap(), - }, - ], - })); + upsampling_bind_groups.push(render_device.create_bind_group( + "bloom_upsampling_bind_group", + &upsampling_pipeline.bind_group_layout, + &BindGroupEntries::sequential(( + &bloom_texture.view(mip), + sampler, + uniforms.binding().unwrap(), + )), + )); } commands.entity(entity).insert(BloomBindGroups { diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs index 0dd3f086ee27c..5bb8b87ebc58b 100644 --- a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs +++ b/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs @@ -7,8 +7,8 @@ use bevy_render::{ extract_component::{ComponentUniforms, DynamicUniformIndex}, render_graph::{Node, NodeRunError, RenderGraphContext}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindingResource, BufferId, Operations, - PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, TextureViewId, + BindGroup, BindGroupEntries, BufferId, Operations, PipelineCache, + RenderPassColorAttachment, RenderPassDescriptor, TextureViewId, }, renderer::RenderContext, view::{ExtractedView, ViewTarget}, @@ -77,29 +77,15 @@ impl Node for CASNode { bind_group } cached_bind_group => { - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("cas_bind_group"), - layout: &sharpening_pipeline.texture_bind_group, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(view_target.source), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler( - &sharpening_pipeline.sampler, - ), - }, - BindGroupEntry { - binding: 2, - resource: uniforms, - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + "cas_bind_group", + &sharpening_pipeline.texture_bind_group, + &BindGroupEntries::sequential(( + view_target.source, + &sharpening_pipeline.sampler, + uniforms, + )), + ); let (_, _, bind_group) = cached_bind_group.insert((uniforms_id, source.id(), bind_group)); diff --git a/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs b/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs index 5609896045ec8..c60306286900a 100644 --- a/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs +++ b/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs @@ -18,10 +18,7 @@ use bevy_render::{ use bevy_ecs::query::QueryItem; use bevy_render::{ render_graph::{NodeRunError, RenderGraphContext, ViewNode}, - render_resource::{ - BindGroupDescriptor, BindGroupEntry, BindingResource, Operations, PipelineCache, - RenderPassDescriptor, - }, + render_resource::{Operations, PipelineCache, RenderPassDescriptor}, renderer::RenderContext, }; @@ -94,18 +91,11 @@ impl ViewNode for CopyDeferredLightingIdNode { return Ok(()); }; - let bind_group = render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("copy_deferred_lighting_id_bind_group"), - layout: ©_deferred_lighting_id_pipeline.layout, - entries: &[BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &deferred_lighting_pass_id_texture.default_view, - ), - }], - }); + let bind_group = render_context.render_device().create_bind_group( + "copy_deferred_lighting_id_bind_group", + ©_deferred_lighting_id_pipeline.layout, + &BindGroupEntries::single(&deferred_lighting_pass_id_texture.default_view), + ); let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { label: Some("copy_deferred_lighting_id_pass"), diff --git a/crates/bevy_core_pipeline/src/fxaa/node.rs b/crates/bevy_core_pipeline/src/fxaa/node.rs index 2daaf0584d981..7eaf4dce268ad 100644 --- a/crates/bevy_core_pipeline/src/fxaa/node.rs +++ b/crates/bevy_core_pipeline/src/fxaa/node.rs @@ -6,9 +6,8 @@ use bevy_ecs::query::QueryItem; use bevy_render::{ render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindingResource, FilterMode, Operations, - PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, SamplerDescriptor, - TextureViewId, + BindGroup, BindGroupEntries, FilterMode, Operations, PipelineCache, + RenderPassColorAttachment, RenderPassDescriptor, SamplerDescriptor, TextureViewId, }, renderer::RenderContext, view::ViewTarget, @@ -61,23 +60,11 @@ impl ViewNode for FxaaNode { ..default() }); - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: None, - layout: &fxaa_pipeline.texture_bind_group, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(source), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&sampler), - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + None, + &fxaa_pipeline.texture_bind_group, + &BindGroupEntries::sequential((source, &sampler)), + ); let (_, bind_group) = cached_bind_group.insert((source.id(), bind_group)); bind_group diff --git a/crates/bevy_core_pipeline/src/msaa_writeback.rs b/crates/bevy_core_pipeline/src/msaa_writeback.rs index 0646d4ce67ffb..d80bc0fce7bc9 100644 --- a/crates/bevy_core_pipeline/src/msaa_writeback.rs +++ b/crates/bevy_core_pipeline/src/msaa_writeback.rs @@ -8,6 +8,7 @@ use bevy_ecs::prelude::*; use bevy_render::{ camera::ExtractedCamera, render_graph::{Node, NodeRunError, RenderGraphApp, RenderGraphContext}, + render_resource::BindGroupEntries, renderer::RenderContext, view::{Msaa, ViewTarget}, Render, RenderSet, @@ -90,23 +91,11 @@ impl Node for MsaaWritebackNode { depth_stencil_attachment: None, }; - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: None, - layout: &blit_pipeline.texture_bind_group, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(post_process.source), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&blit_pipeline.sampler), - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + None, + &blit_pipeline.texture_bind_group, + &BindGroupEntries::sequential((post_process.source, &blit_pipeline.sampler)), + ); let mut render_pass = render_context .command_encoder() diff --git a/crates/bevy_core_pipeline/src/skybox/mod.rs b/crates/bevy_core_pipeline/src/skybox/mod.rs index efc133651930e..11caa03afd8aa 100644 --- a/crates/bevy_core_pipeline/src/skybox/mod.rs +++ b/crates/bevy_core_pipeline/src/skybox/mod.rs @@ -10,13 +10,13 @@ use bevy_render::{ extract_component::{ExtractComponent, ExtractComponentPlugin}, render_asset::RenderAssets, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, - BindGroupLayoutEntry, BindingResource, BindingType, BufferBindingType, - CachedRenderPipelineId, ColorTargetState, ColorWrites, CompareFunction, DepthBiasState, - DepthStencilState, FragmentState, MultisampleState, PipelineCache, PrimitiveState, - RenderPipelineDescriptor, SamplerBindingType, Shader, ShaderStages, ShaderType, - SpecializedRenderPipeline, SpecializedRenderPipelines, StencilFaceState, StencilState, - TextureFormat, TextureSampleType, TextureViewDimension, VertexState, + BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, + BindGroupLayoutEntry, BindingType, BufferBindingType, CachedRenderPipelineId, + ColorTargetState, ColorWrites, CompareFunction, DepthBiasState, DepthStencilState, + FragmentState, MultisampleState, PipelineCache, PrimitiveState, RenderPipelineDescriptor, + SamplerBindingType, Shader, ShaderStages, ShaderType, SpecializedRenderPipeline, + SpecializedRenderPipelines, StencilFaceState, StencilState, TextureFormat, + TextureSampleType, TextureViewDimension, VertexState, }, renderer::RenderDevice, texture::{BevyDefault, Image}, @@ -224,24 +224,15 @@ fn prepare_skybox_bind_groups( if let (Some(skybox), Some(view_uniforms)) = (images.get(&skybox.0), view_uniforms.uniforms.binding()) { - let bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("skybox_bind_group"), - layout: &pipeline.bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(&skybox.texture_view), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&skybox.sampler), - }, - BindGroupEntry { - binding: 2, - resource: view_uniforms, - }, - ], - }); + let bind_group = render_device.create_bind_group( + "skybox_bind_group", + &pipeline.bind_group_layout, + &BindGroupEntries::sequential(( + &skybox.texture_view, + &skybox.sampler, + view_uniforms, + )), + ); commands.entity(entity).insert(SkyboxBindGroup(bind_group)); } diff --git a/crates/bevy_core_pipeline/src/taa/mod.rs b/crates/bevy_core_pipeline/src/taa/mod.rs index e61dccc5d4bf5..28926a1b8ed0d 100644 --- a/crates/bevy_core_pipeline/src/taa/mod.rs +++ b/crates/bevy_core_pipeline/src/taa/mod.rs @@ -21,13 +21,13 @@ use bevy_render::{ prelude::{Camera, Projection}, render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, render_resource::{ - BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, - BindGroupLayoutEntry, BindingResource, BindingType, CachedRenderPipelineId, - ColorTargetState, ColorWrites, Extent3d, FilterMode, FragmentState, MultisampleState, - Operations, PipelineCache, PrimitiveState, RenderPassColorAttachment, RenderPassDescriptor, - RenderPipelineDescriptor, Sampler, SamplerBindingType, SamplerDescriptor, Shader, - ShaderStages, SpecializedRenderPipeline, SpecializedRenderPipelines, TextureDescriptor, - TextureDimension, TextureFormat, TextureSampleType, TextureUsages, TextureViewDimension, + BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, BindGroupLayoutEntry, + BindingType, CachedRenderPipelineId, ColorTargetState, ColorWrites, Extent3d, FilterMode, + FragmentState, MultisampleState, Operations, PipelineCache, PrimitiveState, + RenderPassColorAttachment, RenderPassDescriptor, RenderPipelineDescriptor, Sampler, + SamplerBindingType, SamplerDescriptor, Shader, ShaderStages, SpecializedRenderPipeline, + SpecializedRenderPipelines, TextureDescriptor, TextureDimension, TextureFormat, + TextureSampleType, TextureUsages, TextureViewDimension, }, renderer::{RenderContext, RenderDevice}, texture::{BevyDefault, CachedTexture, TextureCache}, @@ -197,45 +197,18 @@ impl ViewNode for TAANode { }; let view_target = view_target.post_process_write(); - let taa_bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("taa_bind_group"), - layout: &pipelines.taa_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(view_target.source), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView( - &taa_history_textures.read.default_view, - ), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView( - &prepass_motion_vectors_texture.default_view, - ), - }, - BindGroupEntry { - binding: 3, - resource: BindingResource::TextureView( - &prepass_depth_texture.default_view, - ), - }, - BindGroupEntry { - binding: 4, - resource: BindingResource::Sampler(&pipelines.nearest_sampler), - }, - BindGroupEntry { - binding: 5, - resource: BindingResource::Sampler(&pipelines.linear_sampler), - }, - ], - }); + let taa_bind_group = render_context.render_device().create_bind_group( + "taa_bind_group", + &pipelines.taa_bind_group_layout, + &BindGroupEntries::sequential(( + view_target.source, + &taa_history_textures.read.default_view, + &prepass_motion_vectors_texture.default_view, + &prepass_depth_texture.default_view, + &pipelines.nearest_sampler, + &pipelines.linear_sampler, + )), + ); { let mut taa_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { diff --git a/crates/bevy_core_pipeline/src/tonemapping/mod.rs b/crates/bevy_core_pipeline/src/tonemapping/mod.rs index 2af1d48b6eed0..6aada19c458e5 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/mod.rs +++ b/crates/bevy_core_pipeline/src/tonemapping/mod.rs @@ -306,8 +306,7 @@ pub fn get_lut_bindings<'a>( images: &'a RenderAssets, tonemapping_luts: &'a TonemappingLuts, tonemapping: &Tonemapping, - bindings: [u32; 2], -) -> [BindGroupEntry<'a>; 2] { +) -> (&'a TextureView, &'a Sampler) { let image = match tonemapping { // AgX lut texture used when tonemapping doesn't need a texture since it's very small (32x32x32) Tonemapping::None @@ -320,16 +319,7 @@ pub fn get_lut_bindings<'a>( Tonemapping::BlenderFilmic => &tonemapping_luts.blender_filmic, }; let lut_image = images.get(image).unwrap(); - [ - BindGroupEntry { - binding: bindings[0], - resource: BindingResource::TextureView(&lut_image.texture_view), - }, - BindGroupEntry { - binding: bindings[1], - resource: BindingResource::Sampler(&lut_image.sampler), - }, - ] + (&lut_image.texture_view, &lut_image.sampler) } pub fn get_lut_bind_group_layout_entries(bindings: [u32; 2]) -> [BindGroupLayoutEntry; 2] { diff --git a/crates/bevy_core_pipeline/src/tonemapping/node.rs b/crates/bevy_core_pipeline/src/tonemapping/node.rs index e3da4aa03e417..1d1c95d970850 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/node.rs +++ b/crates/bevy_core_pipeline/src/tonemapping/node.rs @@ -7,9 +7,8 @@ use bevy_render::{ render_asset::RenderAssets, render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindingResource, BufferId, LoadOp, - Operations, PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, - SamplerDescriptor, TextureViewId, + BindGroup, BindGroupEntries, BufferId, LoadOp, Operations, PipelineCache, + RenderPassColorAttachment, RenderPassDescriptor, SamplerDescriptor, TextureViewId, }, renderer::RenderContext, texture::Image, @@ -88,36 +87,19 @@ impl ViewNode for TonemappingNode { let tonemapping_luts = world.resource::(); - let mut entries = vec![ - BindGroupEntry { - binding: 0, - resource: view_uniforms.binding().unwrap(), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView(source), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::Sampler(&sampler), - }, - ]; - - entries.extend(get_lut_bindings( - gpu_images, - tonemapping_luts, - tonemapping, - [3, 4], - )); - - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: None, - layout: &tonemapping_pipeline.texture_bind_group, - entries: &entries, - }); + let lut_bindings = get_lut_bindings(gpu_images, tonemapping_luts, tonemapping); + + let bind_group = render_context.render_device().create_bind_group( + None, + &tonemapping_pipeline.texture_bind_group, + &BindGroupEntries::sequential(( + view_uniforms, + source, + &sampler, + lut_bindings.0, + lut_bindings.1, + )), + ); let (_, _, bind_group) = cached_bind_group.insert((view_uniforms_id, source.id(), bind_group)); diff --git a/crates/bevy_core_pipeline/src/upscaling/node.rs b/crates/bevy_core_pipeline/src/upscaling/node.rs index 76ff1d195c998..536b2b9437515 100644 --- a/crates/bevy_core_pipeline/src/upscaling/node.rs +++ b/crates/bevy_core_pipeline/src/upscaling/node.rs @@ -4,9 +4,8 @@ use bevy_render::{ camera::{CameraOutputMode, ExtractedCamera}, render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindingResource, LoadOp, Operations, - PipelineCache, RenderPassColorAttachment, RenderPassDescriptor, SamplerDescriptor, - TextureViewId, + BindGroup, BindGroupEntries, LoadOp, Operations, PipelineCache, RenderPassColorAttachment, + RenderPassDescriptor, SamplerDescriptor, TextureViewId, }, renderer::RenderContext, view::ViewTarget, @@ -57,23 +56,11 @@ impl ViewNode for UpscalingNode { .render_device() .create_sampler(&SamplerDescriptor::default()); - let bind_group = - render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: None, - layout: &blit_pipeline.texture_bind_group, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(upscaled_texture), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&sampler), - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + None, + &blit_pipeline.texture_bind_group, + &BindGroupEntries::sequential((upscaled_texture, &sampler)), + ); let (_, bind_group) = cached_bind_group.insert((upscaled_texture.id(), bind_group)); bind_group diff --git a/crates/bevy_gizmos/src/lib.rs b/crates/bevy_gizmos/src/lib.rs index 2efaa9c914607..446605b43e4f8 100644 --- a/crates/bevy_gizmos/src/lib.rs +++ b/crates/bevy_gizmos/src/lib.rs @@ -52,7 +52,7 @@ use bevy_render::{ render_asset::{PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets}, render_phase::{PhaseItem, RenderCommand, RenderCommandResult, TrackedRenderPass}, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, + BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingType, Buffer, BufferBindingType, BufferInitDescriptor, BufferUsages, Shader, ShaderStages, ShaderType, VertexAttribute, VertexBufferLayout, VertexFormat, VertexStepMode, @@ -422,14 +422,11 @@ fn prepare_line_gizmo_bind_group( ) { if let Some(binding) = line_gizmo_uniforms.uniforms().binding() { commands.insert_resource(LineGizmoUniformBindgroup { - bindgroup: render_device.create_bind_group(&BindGroupDescriptor { - entries: &[BindGroupEntry { - binding: 0, - resource: binding, - }], - label: Some("LineGizmoUniform bindgroup"), - layout: &line_gizmo_uniform_layout.layout, - }), + bindgroup: render_device.create_bind_group( + "LineGizmoUniform bindgroup", + &line_gizmo_uniform_layout.layout, + &BindGroupEntries::single(binding), + ), }); } } diff --git a/crates/bevy_pbr/src/deferred/mod.rs b/crates/bevy_pbr/src/deferred/mod.rs index b8b61d15573da..f679f638d42ac 100644 --- a/crates/bevy_pbr/src/deferred/mod.rs +++ b/crates/bevy_pbr/src/deferred/mod.rs @@ -191,16 +191,11 @@ impl ViewNode for DeferredOpaquePass3dPbrLightingNode { return Ok(()); }; - let bind_group_1 = render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("deferred_lighting_layout_group_1"), - layout: &deferred_lighting_layout.bind_group_layout_1, - entries: &[BindGroupEntry { - binding: 0, - resource: deferred_lighting_pass_id_binding.clone(), - }], - }); + let bind_group_1 = render_context.render_device().create_bind_group( + "deferred_lighting_layout_group_1", + &deferred_lighting_layout.bind_group_layout_1, + &BindGroupEntries::single(deferred_lighting_pass_id_binding), + ); let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { label: Some("deferred_lighting_pass"), diff --git a/crates/bevy_pbr/src/environment_map/mod.rs b/crates/bevy_pbr/src/environment_map/mod.rs index f46f0309f0cac..8cc74a212a396 100644 --- a/crates/bevy_pbr/src/environment_map/mod.rs +++ b/crates/bevy_pbr/src/environment_map/mod.rs @@ -7,8 +7,8 @@ use bevy_render::{ extract_component::{ExtractComponent, ExtractComponentPlugin}, render_asset::RenderAssets, render_resource::{ - BindGroupEntry, BindGroupLayoutEntry, BindingResource, BindingType, SamplerBindingType, - Shader, ShaderStages, TextureSampleType, TextureViewDimension, + BindGroupLayoutEntry, BindingType, Sampler, SamplerBindingType, Shader, ShaderStages, + TextureSampleType, TextureView, TextureViewDimension, }, texture::{FallbackImageCubemap, Image}, }; @@ -65,8 +65,7 @@ pub fn get_bindings<'a>( environment_map_light: Option<&EnvironmentMapLight>, images: &'a RenderAssets, fallback_image_cubemap: &'a FallbackImageCubemap, - bindings: [u32; 3], -) -> [BindGroupEntry<'a>; 3] { +) -> (&'a TextureView, &'a TextureView, &'a Sampler) { let (diffuse_map, specular_map) = match ( environment_map_light.and_then(|env_map| images.get(&env_map.diffuse_map)), environment_map_light.and_then(|env_map| images.get(&env_map.specular_map)), @@ -80,20 +79,7 @@ pub fn get_bindings<'a>( ), }; - [ - BindGroupEntry { - binding: bindings[0], - resource: BindingResource::TextureView(diffuse_map), - }, - BindGroupEntry { - binding: bindings[1], - resource: BindingResource::TextureView(specular_map), - }, - BindGroupEntry { - binding: bindings[2], - resource: BindingResource::Sampler(&fallback_image_cubemap.sampler), - }, - ] + (diffuse_map, specular_map, &fallback_image_cubemap.sampler) } pub fn get_bind_group_layout_entries(bindings: [u32; 3]) -> [BindGroupLayoutEntry; 3] { diff --git a/crates/bevy_pbr/src/prepass/mod.rs b/crates/bevy_pbr/src/prepass/mod.rs index 6c5b4559d045c..54155ce385a0e 100644 --- a/crates/bevy_pbr/src/prepass/mod.rs +++ b/crates/bevy_pbr/src/prepass/mod.rs @@ -35,7 +35,7 @@ use bevy_render::{ RenderPhase, SetItemPipeline, TrackedRenderPass, }, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, + BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingType, BufferBindingType, ColorTargetState, ColorWrites, CompareFunction, DepthBiasState, DepthStencilState, DynamicUniformBuffer, FragmentState, FrontFace, MultisampleState, PipelineCache, PolygonMode, PrimitiveState, PushConstantRange, @@ -713,42 +713,22 @@ pub fn prepare_prepass_view_bind_group( view_uniforms.uniforms.binding(), globals_buffer.buffer.binding(), ) { - prepass_view_bind_group.no_motion_vectors = - Some(render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ - BindGroupEntry { - binding: 0, - resource: view_binding.clone(), - }, - BindGroupEntry { - binding: 1, - resource: globals_binding.clone(), - }, - ], - label: Some("prepass_view_no_motion_vectors_bind_group"), - layout: &prepass_pipeline.view_layout_no_motion_vectors, - })); + prepass_view_bind_group.no_motion_vectors = Some(render_device.create_bind_group( + "prepass_view_no_motion_vectors_bind_group", + &prepass_pipeline.view_layout_no_motion_vectors, + &BindGroupEntries::sequential((view_binding.clone(), globals_binding.clone())), + )); if let Some(previous_view_proj_binding) = previous_view_proj_uniforms.uniforms.binding() { - prepass_view_bind_group.motion_vectors = - Some(render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ - BindGroupEntry { - binding: 0, - resource: view_binding, - }, - BindGroupEntry { - binding: 1, - resource: globals_binding, - }, - BindGroupEntry { - binding: 2, - resource: previous_view_proj_binding, - }, - ], - label: Some("prepass_view_motion_vectors_bind_group"), - layout: &prepass_pipeline.view_layout_motion_vectors, - })); + prepass_view_bind_group.motion_vectors = Some(render_device.create_bind_group( + "prepass_view_motion_vectors_bind_group", + &prepass_pipeline.view_layout_motion_vectors, + &BindGroupEntries::sequential(( + view_binding, + globals_binding, + previous_view_proj_binding, + )), + )); } } } diff --git a/crates/bevy_pbr/src/prepass/prepass_bindings.rs b/crates/bevy_pbr/src/prepass/prepass_bindings.rs index acbf80ceabde1..b72ddd1e318cd 100644 --- a/crates/bevy_pbr/src/prepass/prepass_bindings.rs +++ b/crates/bevy_pbr/src/prepass/prepass_bindings.rs @@ -1,7 +1,7 @@ use bevy_core_pipeline::prepass::ViewPrepassTextures; use bevy_render::render_resource::{ - BindGroupEntry, BindGroupLayoutEntry, BindingResource, BindingType, ShaderStages, - TextureAspect, TextureSampleType, TextureView, TextureViewDescriptor, TextureViewDimension, + BindGroupLayoutEntry, BindingType, ShaderStages, TextureAspect, TextureSampleType, TextureView, + TextureViewDescriptor, TextureViewDimension, }; use bevy_utils::default; use smallvec::SmallVec; @@ -83,51 +83,7 @@ pub fn get_bind_group_layout_entries( result } -// Needed so the texture views can live long enough. -pub struct PrepassBindingsSet { - depth_view: Option, - normal_view: Option, - motion_vectors_view: Option, - deferred_view: Option, -} - -impl PrepassBindingsSet { - pub fn get_entries(&self, bindings: [u32; 4]) -> SmallVec<[BindGroupEntry; 4]> { - let mut result = SmallVec::<[BindGroupEntry; 4]>::new(); - - if let Some(ref depth_view) = self.depth_view { - result.push(BindGroupEntry { - binding: bindings[0], - resource: BindingResource::TextureView(depth_view), - }); - } - - if let Some(ref normal_view) = self.normal_view { - result.push(BindGroupEntry { - binding: bindings[1], - resource: BindingResource::TextureView(normal_view), - }); - } - - if let Some(ref motion_vectors_view) = self.motion_vectors_view { - result.push(BindGroupEntry { - binding: bindings[2], - resource: BindingResource::TextureView(motion_vectors_view), - }); - } - - if let Some(ref deferred_view) = self.deferred_view { - result.push(BindGroupEntry { - binding: bindings[3], - resource: BindingResource::TextureView(deferred_view), - }); - } - - result - } -} - -pub fn get_bindings(prepass_textures: Option<&ViewPrepassTextures>) -> PrepassBindingsSet { +pub fn get_bindings(prepass_textures: Option<&ViewPrepassTextures>) -> [Option; 4] { let depth_desc = TextureViewDescriptor { label: Some("prepass_depth"), aspect: TextureAspect::DepthOnly, @@ -149,10 +105,5 @@ pub fn get_bindings(prepass_textures: Option<&ViewPrepassTextures>) -> PrepassBi .and_then(|x| x.deferred.as_ref()) .map(|texture| texture.default_view.clone()); - PrepassBindingsSet { - depth_view, - normal_view, - motion_vectors_view, - deferred_view, - } + [depth_view, normal_view, motion_vectors_view, deferred_view] } diff --git a/crates/bevy_pbr/src/render/mesh_bindings.rs b/crates/bevy_pbr/src/render/mesh_bindings.rs index dcc01e1aa4c8b..bf45fd12b8c83 100644 --- a/crates/bevy_pbr/src/render/mesh_bindings.rs +++ b/crates/bevy_pbr/src/render/mesh_bindings.rs @@ -4,8 +4,7 @@ use bevy_math::Mat4; use bevy_render::{ mesh::morph::MAX_MORPH_WEIGHTS, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupLayout, BindGroupLayoutDescriptor, - BindingResource, Buffer, TextureView, + BindGroup, BindGroupLayout, BindGroupLayoutDescriptor, BindingResource, Buffer, TextureView, }, renderer::RenderDevice, }; @@ -179,11 +178,11 @@ impl MeshLayouts { // ---------- BindGroup methods ---------- pub fn model_only(&self, render_device: &RenderDevice, model: &BindingResource) -> BindGroup { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[entry::model(0, model.clone())], - layout: &self.model_only, - label: Some("model_only_mesh_bind_group"), - }) + render_device.create_bind_group( + "model_only_mesh_bind_group", + &self.model_only, + &[entry::model(0, model.clone())], + ) } pub fn skinned( &self, @@ -191,11 +190,11 @@ impl MeshLayouts { model: &BindingResource, skin: &Buffer, ) -> BindGroup { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[entry::model(0, model.clone()), entry::skinning(1, skin)], - layout: &self.skinned, - label: Some("skinned_mesh_bind_group"), - }) + render_device.create_bind_group( + "skinned_mesh_bind_group", + &self.skinned, + &[entry::model(0, model.clone()), entry::skinning(1, skin)], + ) } pub fn morphed( &self, @@ -204,15 +203,15 @@ impl MeshLayouts { weights: &Buffer, targets: &TextureView, ) -> BindGroup { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ + render_device.create_bind_group( + "morphed_mesh_bind_group", + &self.morphed, + &[ entry::model(0, model.clone()), entry::weights(2, weights), entry::targets(3, targets), ], - layout: &self.morphed, - label: Some("morphed_mesh_bind_group"), - }) + ) } pub fn morphed_skinned( &self, @@ -222,15 +221,15 @@ impl MeshLayouts { weights: &Buffer, targets: &TextureView, ) -> BindGroup { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ + render_device.create_bind_group( + "morphed_skinned_mesh_bind_group", + &self.morphed_skinned, + &[ entry::model(0, model.clone()), entry::skinning(1, skin), entry::weights(2, weights), entry::targets(3, targets), ], - layout: &self.morphed_skinned, - label: Some("morphed_skinned_mesh_bind_group"), - }) + ) } } diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.rs b/crates/bevy_pbr/src/render/mesh_view_bindings.rs index 8ed474769525f..ba66561c6d785 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bindings.rs +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.rs @@ -15,9 +15,9 @@ use bevy_render::{ globals::{GlobalsBuffer, GlobalsUniform}, render_asset::RenderAssets, render_resource::{ - BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, - BindGroupLayoutEntry, BindingResource, BindingType, BufferBindingType, SamplerBindingType, - ShaderStages, ShaderType, TextureFormat, TextureSampleType, TextureViewDimension, + BindGroup, BindGroupLayout, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingType, + BufferBindingType, DynamicBindGroupEntries, SamplerBindingType, ShaderStages, ShaderType, + TextureFormat, TextureSampleType, TextureViewDimension, }, renderer::RenderDevice, texture::{BevyDefault, FallbackImageCubemap, FallbackImageMsaa, Image}, @@ -383,8 +383,8 @@ pub fn prepare_mesh_view_bind_groups( ) { for ( entity, - view_shadow_bindings, - view_cluster_bindings, + shadow_bindings, + cluster_bindings, ssao_textures, prepass_textures, environment_map, @@ -395,108 +395,58 @@ pub fn prepare_mesh_view_bind_groups( .image_for_samplecount(1, TextureFormat::bevy_default()) .texture_view .clone(); + let ssao_view = ssao_textures + .map(|t| &t.screen_space_ambient_occlusion_texture.default_view) + .unwrap_or(&fallback_ssao); let layout = &mesh_pipeline.get_view_layout( MeshPipelineViewLayoutKey::from(*msaa) | MeshPipelineViewLayoutKey::from(prepass_textures), ); - let mut entries = vec![ - BindGroupEntry { - binding: 0, - resource: view_binding.clone(), - }, - BindGroupEntry { - binding: 1, - resource: light_binding.clone(), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView( - &view_shadow_bindings.point_light_depth_texture_view, - ), - }, - BindGroupEntry { - binding: 3, - resource: BindingResource::Sampler(&shadow_samplers.point_light_sampler), - }, - BindGroupEntry { - binding: 4, - resource: BindingResource::TextureView( - &view_shadow_bindings.directional_light_depth_texture_view, - ), - }, - BindGroupEntry { - binding: 5, - resource: BindingResource::Sampler(&shadow_samplers.directional_light_sampler), - }, - BindGroupEntry { - binding: 6, - resource: point_light_binding.clone(), - }, - BindGroupEntry { - binding: 7, - resource: view_cluster_bindings.light_index_lists_binding().unwrap(), - }, - BindGroupEntry { - binding: 8, - resource: view_cluster_bindings.offsets_and_counts_binding().unwrap(), - }, - BindGroupEntry { - binding: 9, - resource: globals.clone(), - }, - BindGroupEntry { - binding: 10, - resource: fog_binding.clone(), - }, - BindGroupEntry { - binding: 11, - resource: BindingResource::TextureView( - ssao_textures - .map(|t| &t.screen_space_ambient_occlusion_texture.default_view) - .unwrap_or(&fallback_ssao), - ), - }, - ]; - - let env_map = environment_map::get_bindings( - environment_map, - &images, - &fallback_cubemap, - [12, 13, 14], - ); - entries.extend_from_slice(&env_map); - - let tonemapping_luts = - get_lut_bindings(&images, &tonemapping_luts, tonemapping, [15, 16]); - entries.extend_from_slice(&tonemapping_luts); - - let label = Some("mesh_view_bind_group"); + let mut entries = DynamicBindGroupEntries::new_with_indices(( + (0, view_binding.clone()), + (1, light_binding.clone()), + (2, &shadow_bindings.point_light_depth_texture_view), + (3, &shadow_samplers.point_light_sampler), + (4, &shadow_bindings.directional_light_depth_texture_view), + (5, &shadow_samplers.directional_light_sampler), + (6, point_light_binding.clone()), + (7, cluster_bindings.light_index_lists_binding().unwrap()), + (8, cluster_bindings.offsets_and_counts_binding().unwrap()), + (9, globals.clone()), + (10, fog_binding.clone()), + (11, ssao_view), + )); + + let env_map_bindings = + environment_map::get_bindings(environment_map, &images, &fallback_cubemap); + entries = entries.extend_with_indices(( + (12, env_map_bindings.0), + (13, env_map_bindings.1), + (14, env_map_bindings.2), + )); + + let lut_bindings = get_lut_bindings(&images, &tonemapping_luts, tonemapping); + entries = entries.extend_with_indices(((15, lut_bindings.0), (16, lut_bindings.1))); // When using WebGL, we can't have a depth texture with multisampling - let prepass_bindings = if cfg!(any(not(feature = "webgl"), not(target_arch = "wasm32"))) - || (cfg!(all(feature = "webgl", target_arch = "wasm32")) && msaa.samples() == 1) + let prepass_bindings; + if cfg!(any(not(feature = "webgl"), not(target_arch = "wasm32"))) || msaa.samples() == 1 { - Some(prepass::get_bindings(prepass_textures)) - } else { - None - }; - - // This if statement is here to make the borrow checker happy. - // Ideally we could just have `entries.extend_from_slice(&prepass_bindings.get_entries([17, 18, 19, 20]));` - // in the existing if statement above, but that either doesn't allow `prepass_bindings` to live long enough, - // as its used when creating the bind group at the end of the function, or causes a `cannot move out of` error. - if let Some(prepass_bindings) = &prepass_bindings { - entries.extend_from_slice(&prepass_bindings.get_entries([17, 18, 19, 20])); + prepass_bindings = prepass::get_bindings(prepass_textures); + for (binding, index) in prepass_bindings + .iter() + .map(Option::as_ref) + .zip([17, 18, 19, 20]) + .flat_map(|(b, i)| b.map(|b| (b, i))) + { + entries = entries.extend_with_indices(((index, binding),)); + } } commands.entity(entity).insert(MeshViewBindGroup { - value: render_device.create_bind_group(&BindGroupDescriptor { - entries: &entries, - label, - layout, - }), + value: render_device.create_bind_group("mesh_view_bind_group", layout, &entries), }); } } diff --git a/crates/bevy_pbr/src/ssao/mod.rs b/crates/bevy_pbr/src/ssao/mod.rs index eaabea3772654..59fb57cfac3c6 100644 --- a/crates/bevy_pbr/src/ssao/mod.rs +++ b/crates/bevy_pbr/src/ssao/mod.rs @@ -21,12 +21,11 @@ use bevy_render::{ prelude::Camera, render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, render_resource::{ - AddressMode, BindGroup, BindGroupDescriptor, BindGroupEntry, BindGroupLayout, - BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingResource, BindingType, - BufferBindingType, CachedComputePipelineId, ComputePassDescriptor, - ComputePipelineDescriptor, Extent3d, FilterMode, PipelineCache, Sampler, - SamplerBindingType, SamplerDescriptor, Shader, ShaderDefVal, ShaderStages, ShaderType, - SpecializedComputePipeline, SpecializedComputePipelines, StorageTextureAccess, + AddressMode, BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, + BindGroupLayoutEntry, BindingType, BufferBindingType, CachedComputePipelineId, + ComputePassDescriptor, ComputePipelineDescriptor, Extent3d, FilterMode, PipelineCache, + Sampler, SamplerBindingType, SamplerDescriptor, Shader, ShaderDefVal, ShaderStages, + ShaderType, SpecializedComputePipeline, SpecializedComputePipelines, StorageTextureAccess, TextureDescriptor, TextureDimension, TextureFormat, TextureSampleType, TextureUsages, TextureView, TextureViewDescriptor, TextureViewDimension, }, @@ -776,171 +775,63 @@ fn prepare_ssao_bind_groups( }; for (entity, ssao_textures, prepass_textures) in &views { - let common_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("ssao_common_bind_group"), - layout: &pipelines.common_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::Sampler(&pipelines.point_clamp_sampler), - }, - BindGroupEntry { - binding: 1, - resource: view_uniforms.clone(), - }, - ], - }); + let common_bind_group = render_device.create_bind_group( + "ssao_common_bind_group", + &pipelines.common_bind_group_layout, + &BindGroupEntries::sequential((&pipelines.point_clamp_sampler, view_uniforms.clone())), + ); - let preprocess_depth_mip_view_descriptor = TextureViewDescriptor { - format: Some(TextureFormat::R16Float), - dimension: Some(TextureViewDimension::D2), - mip_level_count: Some(1), - ..default() + let create_depth_view = |mip_level| { + ssao_textures + .preprocessed_depth_texture + .texture + .create_view(&TextureViewDescriptor { + label: Some("ssao_preprocessed_depth_texture_mip_view"), + base_mip_level: mip_level, + format: Some(TextureFormat::R16Float), + dimension: Some(TextureViewDimension::D2), + mip_level_count: Some(1), + ..default() + }) }; - let preprocess_depth_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("ssao_preprocess_depth_bind_group"), - layout: &pipelines.preprocess_depth_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &prepass_textures.depth.as_ref().unwrap().default_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_0"), - base_mip_level: 0, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_1"), - base_mip_level: 1, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - BindGroupEntry { - binding: 3, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_2"), - base_mip_level: 2, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - BindGroupEntry { - binding: 4, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_3"), - base_mip_level: 3, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - BindGroupEntry { - binding: 5, - resource: BindingResource::TextureView( - &ssao_textures - .preprocessed_depth_texture - .texture - .create_view(&TextureViewDescriptor { - label: Some("ssao_preprocessed_depth_texture_mip_view_4"), - base_mip_level: 4, - ..preprocess_depth_mip_view_descriptor - }), - ), - }, - ], - }); + let preprocess_depth_bind_group = render_device.create_bind_group( + "ssao_preprocess_depth_bind_group", + &pipelines.preprocess_depth_bind_group_layout, + &BindGroupEntries::sequential(( + &prepass_textures.depth.as_ref().unwrap().default_view, + &create_depth_view(0), + &create_depth_view(1), + &create_depth_view(2), + &create_depth_view(3), + &create_depth_view(4), + )), + ); - let gtao_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("ssao_gtao_bind_group"), - layout: &pipelines.gtao_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &ssao_textures.preprocessed_depth_texture.default_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView( - &prepass_textures.normal.as_ref().unwrap().default_view, - ), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView(&pipelines.hilbert_index_lut), - }, - BindGroupEntry { - binding: 3, - resource: BindingResource::TextureView( - &ssao_textures.ssao_noisy_texture.default_view, - ), - }, - BindGroupEntry { - binding: 4, - resource: BindingResource::TextureView( - &ssao_textures.depth_differences_texture.default_view, - ), - }, - BindGroupEntry { - binding: 5, - resource: globals_uniforms.clone(), - }, - ], - }); + let gtao_bind_group = render_device.create_bind_group( + "ssao_gtao_bind_group", + &pipelines.gtao_bind_group_layout, + &BindGroupEntries::sequential(( + &ssao_textures.preprocessed_depth_texture.default_view, + &prepass_textures.normal.as_ref().unwrap().default_view, + &pipelines.hilbert_index_lut, + &ssao_textures.ssao_noisy_texture.default_view, + &ssao_textures.depth_differences_texture.default_view, + globals_uniforms.clone(), + )), + ); - let spatial_denoise_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Some("ssao_spatial_denoise_bind_group"), - layout: &pipelines.spatial_denoise_bind_group_layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &ssao_textures.ssao_noisy_texture.default_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::TextureView( - &ssao_textures.depth_differences_texture.default_view, - ), - }, - BindGroupEntry { - binding: 2, - resource: BindingResource::TextureView( - &ssao_textures - .screen_space_ambient_occlusion_texture - .default_view, - ), - }, - ], - }); + let spatial_denoise_bind_group = render_device.create_bind_group( + "ssao_spatial_denoise_bind_group", + &pipelines.spatial_denoise_bind_group_layout, + &BindGroupEntries::sequential(( + &ssao_textures.ssao_noisy_texture.default_view, + &ssao_textures.depth_differences_texture.default_view, + &ssao_textures + .screen_space_ambient_occlusion_texture + .default_view, + )), + ); commands.entity(entity).insert(SsaoBindGroups { common_bind_group, diff --git a/crates/bevy_render/src/render_resource/bind_group.rs b/crates/bevy_render/src/render_resource/bind_group.rs index c4de9cb1b6089..8ee876b9c5208 100644 --- a/crates/bevy_render/src/render_resource/bind_group.rs +++ b/crates/bevy_render/src/render_resource/bind_group.rs @@ -9,10 +9,7 @@ use crate::{ pub use bevy_render_macros::AsBindGroup; use encase::ShaderType; use std::ops::Deref; -use wgpu::{ - BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry, - BindingResource, -}; +use wgpu::{BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BindingResource}; define_atomic_id!(BindGroupId); render_resource_wrapper!(ErasedBindGroup, wgpu::BindGroup); @@ -289,11 +286,7 @@ pub trait AsBindGroup { }) .collect::>(); - let bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: Self::label(), - layout, - entries: &entries, - }); + let bind_group = render_device.create_bind_group(Self::label(), layout, &entries); Ok(PreparedBindGroup { bindings, diff --git a/crates/bevy_render/src/render_resource/bind_group_entries.rs b/crates/bevy_render/src/render_resource/bind_group_entries.rs new file mode 100644 index 0000000000000..09336eeb0a093 --- /dev/null +++ b/crates/bevy_render/src/render_resource/bind_group_entries.rs @@ -0,0 +1,282 @@ +use bevy_utils::all_tuples_with_size; +use wgpu::{BindGroupEntry, BindingResource}; + +use super::{Sampler, TextureView}; + +/// Helper for constructing bindgroups. +/// +/// Allows constructing the descriptor's entries as: +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &BindGroupEntries::with_indices(( +/// (2, &my_sampler), +/// (3, my_uniform), +/// )), +/// ); +/// ``` +/// +/// instead of +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &[ +/// BindGroupEntry { +/// binding: 2, +/// resource: BindingResource::Sampler(&my_sampler), +/// }, +/// BindGroupEntry { +/// binding: 3, +/// resource: my_uniform, +/// }, +/// ], +/// ); +/// ``` +/// +/// or +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &BindGroupEntries::sequential(( +/// &my_sampler, +/// my_uniform, +/// )), +/// ); +/// ``` +/// +/// instead of +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &[ +/// BindGroupEntry { +/// binding: 0, +/// resource: BindingResource::Sampler(&my_sampler), +/// }, +/// BindGroupEntry { +/// binding: 1, +/// resource: my_uniform, +/// }, +/// ], +/// ); +/// ``` +/// +/// or +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &BindGroupEntries::single(my_uniform), +/// ); +/// ``` +/// +/// instead of +/// +/// ```ignore +/// render_device.create_bind_group( +/// "my_bind_group", +/// &my_layout, +/// &[ +/// BindGroupEntry { +/// binding: 0, +/// resource: my_uniform, +/// }, +/// ], +/// ); +/// ``` + +pub struct BindGroupEntries<'b, const N: usize = 1> { + entries: [BindGroupEntry<'b>; N], +} + +impl<'b, const N: usize> BindGroupEntries<'b, N> { + #[inline] + pub fn sequential(resources: impl IntoBindingArray<'b, N>) -> Self { + let mut i = 0; + Self { + entries: resources.into_array().map(|resource| { + let binding = i; + i += 1; + BindGroupEntry { binding, resource } + }), + } + } + + #[inline] + pub fn with_indices(indexed_resources: impl IntoIndexedBindingArray<'b, N>) -> Self { + Self { + entries: indexed_resources + .into_array() + .map(|(binding, resource)| BindGroupEntry { binding, resource }), + } + } +} + +impl<'b> BindGroupEntries<'b, 1> { + pub fn single(resource: impl IntoBinding<'b>) -> [BindGroupEntry<'b>; 1] { + [BindGroupEntry { + binding: 0, + resource: resource.into_binding(), + }] + } +} + +impl<'b, const N: usize> std::ops::Deref for BindGroupEntries<'b, N> { + type Target = [BindGroupEntry<'b>]; + + fn deref(&self) -> &[BindGroupEntry<'b>] { + &self.entries + } +} + +pub trait IntoBinding<'a> { + fn into_binding(self) -> BindingResource<'a>; +} + +impl<'a> IntoBinding<'a> for &'a TextureView { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + BindingResource::TextureView(self) + } +} + +impl<'a> IntoBinding<'a> for &'a [&'a wgpu::TextureView] { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + BindingResource::TextureViewArray(self) + } +} + +impl<'a> IntoBinding<'a> for &'a Sampler { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + BindingResource::Sampler(self) + } +} + +impl<'a> IntoBinding<'a> for BindingResource<'a> { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + self + } +} + +impl<'a> IntoBinding<'a> for wgpu::BufferBinding<'a> { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + BindingResource::Buffer(self) + } +} + +pub trait IntoBindingArray<'b, const N: usize> { + fn into_array(self) -> [BindingResource<'b>; N]; +} + +macro_rules! impl_to_binding_slice { + ($N: expr, $(($T: ident, $I: ident)),*) => { + impl<'b, $($T: IntoBinding<'b>),*> IntoBindingArray<'b, $N> for ($($T,)*) { + #[inline] + fn into_array(self) -> [BindingResource<'b>; $N] { + let ($($I,)*) = self; + [$($I.into_binding(), )*] + } + } + } +} + +all_tuples_with_size!(impl_to_binding_slice, 1, 32, T, s); + +pub trait IntoIndexedBindingArray<'b, const N: usize> { + fn into_array(self) -> [(u32, BindingResource<'b>); N]; +} + +macro_rules! impl_to_indexed_binding_slice { + ($N: expr, $(($T: ident, $S: ident, $I: ident)),*) => { + impl<'b, $($T: IntoBinding<'b>),*> IntoIndexedBindingArray<'b, $N> for ($((u32, $T),)*) { + #[inline] + fn into_array(self) -> [(u32, BindingResource<'b>); $N] { + let ($(($S, $I),)*) = self; + [$(($S, $I.into_binding())), *] + } + } + } +} + +all_tuples_with_size!(impl_to_indexed_binding_slice, 1, 32, T, n, s); + +pub struct DynamicBindGroupEntries<'b> { + entries: Vec>, +} + +impl<'b> DynamicBindGroupEntries<'b> { + pub fn sequential(entries: impl IntoBindingArray<'b, N>) -> Self { + Self { + entries: entries + .into_array() + .into_iter() + .enumerate() + .map(|(ix, resource)| BindGroupEntry { + binding: ix as u32, + resource, + }) + .collect(), + } + } + + pub fn extend_sequential( + mut self, + entries: impl IntoBindingArray<'b, N>, + ) -> Self { + let start = self.entries.last().unwrap().binding + 1; + self.entries.extend( + entries + .into_array() + .into_iter() + .enumerate() + .map(|(ix, resource)| BindGroupEntry { + binding: start + ix as u32, + resource, + }), + ); + self + } + + pub fn new_with_indices(entries: impl IntoIndexedBindingArray<'b, N>) -> Self { + Self { + entries: entries + .into_array() + .into_iter() + .map(|(binding, resource)| BindGroupEntry { binding, resource }) + .collect(), + } + } + + pub fn extend_with_indices( + mut self, + entries: impl IntoIndexedBindingArray<'b, N>, + ) -> Self { + self.entries.extend( + entries + .into_array() + .into_iter() + .map(|(binding, resource)| BindGroupEntry { binding, resource }), + ); + self + } +} + +impl<'b> std::ops::Deref for DynamicBindGroupEntries<'b> { + type Target = [BindGroupEntry<'b>]; + + fn deref(&self) -> &[BindGroupEntry<'b>] { + &self.entries + } +} diff --git a/crates/bevy_render/src/render_resource/mod.rs b/crates/bevy_render/src/render_resource/mod.rs index f16f5f1269929..b7d245b0bdbc9 100644 --- a/crates/bevy_render/src/render_resource/mod.rs +++ b/crates/bevy_render/src/render_resource/mod.rs @@ -1,5 +1,6 @@ mod batched_uniform_buffer; mod bind_group; +mod bind_group_entries; mod bind_group_layout; mod buffer; mod buffer_vec; @@ -14,6 +15,7 @@ mod texture; mod uniform_buffer; pub use bind_group::*; +pub use bind_group_entries::*; pub use bind_group_layout::*; pub use buffer::*; pub use buffer_vec::*; diff --git a/crates/bevy_render/src/render_resource/uniform_buffer.rs b/crates/bevy_render/src/render_resource/uniform_buffer.rs index 3ecd692ba2880..95196568ff20a 100644 --- a/crates/bevy_render/src/render_resource/uniform_buffer.rs +++ b/crates/bevy_render/src/render_resource/uniform_buffer.rs @@ -13,6 +13,8 @@ use wgpu::{ util::BufferInitDescriptor, BindingResource, BufferBinding, BufferDescriptor, BufferUsages, }; +use super::IntoBinding; + /// Stores data to be transferred to the GPU and made accessible to shaders as a uniform buffer. /// /// Uniform buffers are available to shaders on a read-only basis. Uniform buffers are commonly used to make available to shaders @@ -139,6 +141,16 @@ impl UniformBuffer { } } +impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a UniformBuffer { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + self.buffer() + .expect("Failed to get buffer") + .as_entire_buffer_binding() + .into_binding() + } +} + /// Stores data to be transferred to the GPU and made accessible to shaders as a dynamic uniform buffer. /// /// Dynamic uniform buffers are available to shaders on a read-only basis. Dynamic uniform buffers are commonly used to make @@ -367,3 +379,10 @@ impl<'a> BufferMut for QueueWriteBufferViewWrapper<'a> { self.buffer_view.write(offset, val); } } + +impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a DynamicUniformBuffer { + #[inline] + fn into_binding(self) -> BindingResource<'a> { + self.binding().unwrap() + } +} diff --git a/crates/bevy_render/src/renderer/render_device.rs b/crates/bevy_render/src/renderer/render_device.rs index 8a177e94774cb..6a126df8aa41e 100644 --- a/crates/bevy_render/src/renderer/render_device.rs +++ b/crates/bevy_render/src/renderer/render_device.rs @@ -3,7 +3,9 @@ use crate::render_resource::{ RenderPipeline, Sampler, Texture, }; use bevy_ecs::system::Resource; -use wgpu::{util::DeviceExt, BufferAsyncError, BufferBindingType}; +use wgpu::{ + util::DeviceExt, BindGroupDescriptor, BindGroupEntry, BufferAsyncError, BufferBindingType, +}; use super::RenderQueue; @@ -82,8 +84,17 @@ impl RenderDevice { /// Creates a new [`BindGroup`](wgpu::BindGroup). #[inline] - pub fn create_bind_group(&self, desc: &wgpu::BindGroupDescriptor) -> BindGroup { - let wgpu_bind_group = self.device.create_bind_group(desc); + pub fn create_bind_group<'a>( + &self, + label: impl Into>, + layout: &'a BindGroupLayout, + entries: &'a [BindGroupEntry<'a>], + ) -> BindGroup { + let wgpu_bind_group = self.device.create_bind_group(&BindGroupDescriptor { + label: label.into(), + layout, + entries, + }); BindGroup::from(wgpu_bind_group) } diff --git a/crates/bevy_render/src/view/window/mod.rs b/crates/bevy_render/src/view/window/mod.rs index 37729f3a253ed..31ac4fca2cee3 100644 --- a/crates/bevy_render/src/view/window/mod.rs +++ b/crates/bevy_render/src/view/window/mod.rs @@ -1,5 +1,7 @@ use crate::{ - render_resource::{PipelineCache, SpecializedRenderPipelines, SurfaceTexture, TextureView}, + render_resource::{ + BindGroupEntries, PipelineCache, SpecializedRenderPipelines, SurfaceTexture, TextureView, + }, renderer::{RenderAdapter, RenderDevice, RenderInstance}, texture::TextureFormatPixelInfo, Extract, ExtractSchedule, Render, RenderApp, RenderSet, @@ -413,14 +415,11 @@ pub fn prepare_windows( usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST, mapped_at_creation: false, }); - let bind_group = render_device.create_bind_group(&wgpu::BindGroupDescriptor { - label: Some("screenshot-to-screen-bind-group"), - layout: &screenshot_pipeline.bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: wgpu::BindingResource::TextureView(&texture_view), - }], - }); + let bind_group = render_device.create_bind_group( + "screenshot-to-screen-bind-group", + &screenshot_pipeline.bind_group_layout, + &BindGroupEntries::single(&texture_view), + ); let pipeline_id = pipelines.specialize( &pipeline_cache, &screenshot_pipeline, diff --git a/crates/bevy_sprite/src/mesh2d/mesh.rs b/crates/bevy_sprite/src/mesh2d/mesh.rs index 5f62000bc441e..df5e4f0594011 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh.rs +++ b/crates/bevy_sprite/src/mesh2d/mesh.rs @@ -596,14 +596,11 @@ pub fn prepare_mesh2d_bind_group( ) { if let Some(binding) = mesh2d_uniforms.binding() { commands.insert_resource(Mesh2dBindGroup { - value: render_device.create_bind_group(&BindGroupDescriptor { - entries: &[BindGroupEntry { - binding: 0, - resource: binding, - }], - label: Some("mesh2d_bind_group"), - layout: &mesh2d_pipeline.mesh_layout, - }), + value: render_device.create_bind_group( + "mesh2d_bind_group", + &mesh2d_pipeline.mesh_layout, + &BindGroupEntries::single(binding), + ), }); } } @@ -626,20 +623,11 @@ pub fn prepare_mesh2d_view_bind_groups( globals_buffer.buffer.binding(), ) { for entity in &views { - let view_bind_group = render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ - BindGroupEntry { - binding: 0, - resource: view_binding.clone(), - }, - BindGroupEntry { - binding: 1, - resource: globals.clone(), - }, - ], - label: Some("mesh2d_view_bind_group"), - layout: &mesh2d_pipeline.view_layout, - }); + let view_bind_group = render_device.create_bind_group( + "mesh2d_view_bind_group", + &mesh2d_pipeline.view_layout, + &BindGroupEntries::sequential((view_binding.clone(), globals.clone())), + ); commands.entity(entity).insert(Mesh2dViewBindGroup { value: view_bind_group, diff --git a/crates/bevy_sprite/src/render/mod.rs b/crates/bevy_sprite/src/render/mod.rs index c70d33286ccf8..bebdbad231393 100644 --- a/crates/bevy_sprite/src/render/mod.rs +++ b/crates/bevy_sprite/src/render/mod.rs @@ -21,7 +21,7 @@ use bevy_render::{ DrawFunctions, PhaseItem, RenderCommand, RenderCommandResult, RenderPhase, SetItemPipeline, TrackedRenderPass, }, - render_resource::*, + render_resource::{BindGroupEntries, *}, renderer::{RenderDevice, RenderQueue}, texture::{ BevyDefault, DefaultImageSampler, GpuImage, Image, ImageSampler, TextureFormatPixelInfo, @@ -623,14 +623,11 @@ pub fn prepare_sprites( // Clear the sprite instances sprite_meta.sprite_instance_buffer.clear(); - sprite_meta.view_bind_group = Some(render_device.create_bind_group(&BindGroupDescriptor { - entries: &[BindGroupEntry { - binding: 0, - resource: view_binding, - }], - label: Some("sprite_view_bind_group"), - layout: &sprite_pipeline.view_layout, - })); + sprite_meta.view_bind_group = Some(render_device.create_bind_group( + "sprite_view_bind_group", + &sprite_pipeline.view_layout, + &BindGroupEntries::single(view_binding), + )); // Index buffer indices let mut index = 0; @@ -667,22 +664,14 @@ pub fn prepare_sprites( .values .entry(batch_image_handle) .or_insert_with(|| { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &gpu_image.texture_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&gpu_image.sampler), - }, - ], - label: Some("sprite_material_bind_group"), - layout: &sprite_pipeline.material_layout, - }) + render_device.create_bind_group( + "sprite_material_bind_group", + &sprite_pipeline.material_layout, + &BindGroupEntries::sequential(( + &gpu_image.texture_view, + &gpu_image.sampler, + )), + ) }); } diff --git a/crates/bevy_ui/src/render/mod.rs b/crates/bevy_ui/src/render/mod.rs index c852122e6ac76..a7a52c628319e 100644 --- a/crates/bevy_ui/src/render/mod.rs +++ b/crates/bevy_ui/src/render/mod.rs @@ -5,7 +5,7 @@ use bevy_core_pipeline::{core_2d::Camera2d, core_3d::Camera3d}; use bevy_hierarchy::Parent; use bevy_render::render_phase::PhaseItem; use bevy_render::view::ViewVisibility; -use bevy_render::{ExtractSchedule, Render}; +use bevy_render::{render_resource::BindGroupEntries, ExtractSchedule, Render}; use bevy_window::{PrimaryWindow, Window}; pub use pipeline::*; pub use render_pass::*; @@ -812,14 +812,11 @@ pub fn prepare_uinodes( let mut batches: Vec<(Entity, UiBatch)> = Vec::with_capacity(*previous_len); ui_meta.vertices.clear(); - ui_meta.view_bind_group = Some(render_device.create_bind_group(&BindGroupDescriptor { - entries: &[BindGroupEntry { - binding: 0, - resource: view_binding, - }], - label: Some("ui_view_bind_group"), - layout: &ui_pipeline.view_layout, - })); + ui_meta.view_bind_group = Some(render_device.create_bind_group( + "ui_view_bind_group", + &ui_pipeline.view_layout, + &BindGroupEntries::single(view_binding), + )); // Vertex buffer index let mut index = 0; @@ -851,24 +848,14 @@ pub fn prepare_uinodes( .values .entry(batch_image_handle) .or_insert_with(|| { - render_device.create_bind_group(&BindGroupDescriptor { - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView( - &gpu_image.texture_view, - ), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler( - &gpu_image.sampler, - ), - }, - ], - label: Some("ui_material_bind_group"), - layout: &ui_pipeline.image_layout, - }) + render_device.create_bind_group( + "ui_material_bind_group", + &ui_pipeline.image_layout, + &BindGroupEntries::sequential(( + &gpu_image.texture_view, + &gpu_image.sampler, + )), + ) }); existing_batch = batches.last_mut(); diff --git a/crates/bevy_utils/macros/src/lib.rs b/crates/bevy_utils/macros/src/lib.rs index 26fb9839919bd..4189b432ac605 100644 --- a/crates/bevy_utils/macros/src/lib.rs +++ b/crates/bevy_utils/macros/src/lib.rs @@ -136,3 +136,38 @@ pub fn all_tuples(input: TokenStream) -> TokenStream { )* }) } + +#[proc_macro] +pub fn all_tuples_with_size(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as AllTuples); + let len = 1 + input.end - input.start; + let mut ident_tuples = Vec::with_capacity(len); + for i in 0..=len { + let idents = input + .idents + .iter() + .map(|ident| format_ident!("{}{}", ident, i)); + if input.idents.len() < 2 { + ident_tuples.push(quote! { + #(#idents)* + }); + } else { + ident_tuples.push(quote! { + (#(#idents),*) + }); + } + } + + let macro_ident = &input.macro_ident; + let invocations = (input.start..=input.end).map(|i| { + let ident_tuples = &ident_tuples[..i]; + quote! { + #macro_ident!(#i, #(#ident_tuples),*); + } + }); + TokenStream::from(quote! { + #( + #invocations + )* + }) +} diff --git a/examples/shader/compute_shader_game_of_life.rs b/examples/shader/compute_shader_game_of_life.rs index 10c368771474b..2f8a269e592b8 100644 --- a/examples/shader/compute_shader_game_of_life.rs +++ b/examples/shader/compute_shader_game_of_life.rs @@ -107,14 +107,11 @@ fn prepare_bind_group( render_device: Res, ) { let view = gpu_images.get(&game_of_life_image.0).unwrap(); - let bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: None, - layout: &pipeline.texture_bind_group_layout, - entries: &[BindGroupEntry { - binding: 0, - resource: BindingResource::TextureView(&view.texture_view), - }], - }); + let bind_group = render_device.create_bind_group( + None, + &pipeline.texture_bind_group_layout, + &BindGroupEntries::single(&view.texture_view), + ); commands.insert_resource(GameOfLifeImageBindGroup(bind_group)); } diff --git a/examples/shader/post_processing.rs b/examples/shader/post_processing.rs index 94d3096e6a7c4..66d26cffbc271 100644 --- a/examples/shader/post_processing.rs +++ b/examples/shader/post_processing.rs @@ -20,12 +20,12 @@ use bevy::{ NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner, }, render_resource::{ - BindGroupDescriptor, BindGroupEntry, BindGroupLayout, BindGroupLayoutDescriptor, - BindGroupLayoutEntry, BindingResource, BindingType, CachedRenderPipelineId, - ColorTargetState, ColorWrites, FragmentState, MultisampleState, Operations, - PipelineCache, PrimitiveState, RenderPassColorAttachment, RenderPassDescriptor, - RenderPipelineDescriptor, Sampler, SamplerBindingType, SamplerDescriptor, ShaderStages, - ShaderType, TextureFormat, TextureSampleType, TextureViewDimension, + BindGroupEntries, BindGroupLayout, BindGroupLayoutDescriptor, BindGroupLayoutEntry, + BindingType, CachedRenderPipelineId, ColorTargetState, ColorWrites, FragmentState, + MultisampleState, Operations, PipelineCache, PrimitiveState, RenderPassColorAttachment, + RenderPassDescriptor, RenderPipelineDescriptor, Sampler, SamplerBindingType, + SamplerDescriptor, ShaderStages, ShaderType, TextureFormat, TextureSampleType, + TextureViewDimension, }, renderer::{RenderContext, RenderDevice}, texture::BevyDefault, @@ -176,30 +176,19 @@ impl ViewNode for PostProcessNode { // The reason it doesn't work is because each post_process_write will alternate the source/destination. // The only way to have the correct source/destination for the bind_group // is to make sure you get it during the node execution. - let bind_group = render_context - .render_device() - .create_bind_group(&BindGroupDescriptor { - label: Some("post_process_bind_group"), - layout: &post_process_pipeline.layout, - // It's important for this to match the BindGroupLayout defined in the PostProcessPipeline - entries: &[ - BindGroupEntry { - binding: 0, - // Make sure to use the source view - resource: BindingResource::TextureView(post_process.source), - }, - BindGroupEntry { - binding: 1, - // Use the sampler created for the pipeline - resource: BindingResource::Sampler(&post_process_pipeline.sampler), - }, - BindGroupEntry { - binding: 2, - // Set the settings binding - resource: settings_binding.clone(), - }, - ], - }); + let bind_group = render_context.render_device().create_bind_group( + "post_process_bind_group", + &post_process_pipeline.layout, + // It's important for this to match the BindGroupLayout defined in the PostProcessPipeline + &BindGroupEntries::sequential(( + // Make sure to use the source view + post_process.source, + // Use the sampler created for the pipeline + &post_process_pipeline.sampler, + // Set the settings binding + settings_binding.clone(), + )), + ); // Begin the render pass let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { diff --git a/examples/shader/texture_binding_array.rs b/examples/shader/texture_binding_array.rs index bcce8ccac7a8e..82bcf34acc6ab 100644 --- a/examples/shader/texture_binding_array.rs +++ b/examples/shader/texture_binding_array.rs @@ -5,11 +5,8 @@ use bevy::{ prelude::*, reflect::TypePath, render::{ - render_asset::RenderAssets, - render_resource::{AsBindGroupError, PreparedBindGroup, *}, - renderer::RenderDevice, - texture::FallbackImage, - RenderApp, + render_asset::RenderAssets, render_resource::*, renderer::RenderDevice, + texture::FallbackImage, RenderApp, }, }; use std::{num::NonZeroU32, process::exit}; @@ -119,20 +116,11 @@ impl AsBindGroup for BindlessMaterial { textures[id] = &*image.texture_view; } - let bind_group = render_device.create_bind_group(&BindGroupDescriptor { - label: "bindless_material_bind_group".into(), + let bind_group = render_device.create_bind_group( + "bindless_material_bind_group", layout, - entries: &[ - BindGroupEntry { - binding: 0, - resource: BindingResource::TextureViewArray(&textures[..]), - }, - BindGroupEntry { - binding: 1, - resource: BindingResource::Sampler(&fallback_image.sampler), - }, - ], - }); + &BindGroupEntries::sequential((&textures[..], &fallback_image.sampler)), + ); Ok(PreparedBindGroup { bindings: vec![], From 38e0a8010e57b872b7ab33fdf38d347db183907c Mon Sep 17 00:00:00 2001 From: Rob Parrett Date: Sat, 21 Oct 2023 10:38:15 -0700 Subject: [PATCH 47/63] Tidy up UI node docs (#10189) # Objective While reviewing #10187 I noticed some other mistakes in the UI node docs. ## Solution I did a quick proofreading pass and fixed a few things. And of course, the typo from that other PR. ## Notes I occasionally insert a period to make a section of doc self-consistent but didn't go one way or the other on all periods in the file. --------- Co-authored-by: Noah --- crates/bevy_ui/src/ui_node.rs | 182 ++++++++++++++++++---------------- 1 file changed, 98 insertions(+), 84 deletions(-) diff --git a/crates/bevy_ui/src/ui_node.rs b/crates/bevy_ui/src/ui_node.rs index 3a87765fea840..81769141d4c17 100644 --- a/crates/bevy_ui/src/ui_node.rs +++ b/crates/bevy_ui/src/ui_node.rs @@ -14,29 +14,34 @@ use thiserror::Error; #[derive(Component, Debug, Copy, Clone, Reflect)] #[reflect(Component, Default)] pub struct Node { - /// The size of the node as width and height in logical pixels - /// automatically calculated by [`super::layout::ui_layout_system`] + /// The size of the node as width and height in logical pixels. + /// + /// Automatically calculated by [`super::layout::ui_layout_system`]. pub(crate) calculated_size: Vec2, - /// The width of this node's outline - /// If this value is `Auto`, negative or `0.` then no outline will be rendered - /// automatically calculated by [`super::layout::resolve_outlines_system`] + /// The width of this node's outline. + /// If this value is `Auto`, negative or `0.` then no outline will be rendered. + /// + /// Automatically calculated by [`super::layout::resolve_outlines_system`]. pub(crate) outline_width: f32, - // The amount of space between the outline and the edge of the node + /// The amount of space between the outline and the edge of the node. pub(crate) outline_offset: f32, - /// The unrounded size of the node as width and height in logical pixels - /// automatically calculated by [`super::layout::ui_layout_system`] + /// The unrounded size of the node as width and height in logical pixels. + /// + /// Automatically calculated by [`super::layout::ui_layout_system`]. pub(crate) unrounded_size: Vec2, } impl Node { - /// The calculated node size as width and height in logical pixels - /// automatically calculated by [`super::layout::ui_layout_system`] + /// The calculated node size as width and height in logical pixels. + /// + /// Automatically calculated by [`super::layout::ui_layout_system`]. pub const fn size(&self) -> Vec2 { self.calculated_size } - /// The calculated node size as width and height in logical pixels before rounding - /// automatically calculated by [`super::layout::ui_layout_system`] + /// The calculated node size as width and height in logical pixels before rounding. + /// + /// Automatically calculated by [`super::layout::ui_layout_system`]. pub const fn unrounded_size(&self) -> Vec2 { self.unrounded_size } @@ -102,7 +107,8 @@ impl Default for Node { /// Describes the style of a UI container node /// -/// Node's can be laid out using either Flexbox or CSS Grid Layout.
+/// Nodes can be laid out using either Flexbox or CSS Grid Layout. +/// /// See below for general learning resources and for documentation on the individual style properties. /// /// ### Flexbox @@ -128,7 +134,7 @@ pub struct Style { /// pub display: Display, - /// Whether a node should be laid out in-flow with, or independently of it's siblings: + /// Whether a node should be laid out in-flow with, or independently of its siblings: /// - [`PositionType::Relative`]: Layout this node in-flow with other nodes using the usual (flexbox/grid) layout algorithm. /// - [`PositionType::Absolute`]: Layout this node on top and independently of other nodes. /// @@ -140,9 +146,10 @@ pub struct Style { /// pub overflow: Overflow, - /// Defines the text direction. For example English is written LTR (left-to-right) while Arabic is written RTL (right-to-left). + /// Defines the text direction. For example, English is written LTR (left-to-right) while Arabic is written RTL (right-to-left). + /// + /// Note: the corresponding CSS property also affects box layout order, but this isn't yet implemented in Bevy. /// - /// Note: the corresponding CSS property also affects box layout order, but this isn't yet implemented in bevy. /// pub direction: Direction, @@ -184,12 +191,12 @@ pub struct Style { /// pub height: Val, - /// The minimum width of the node. `min_width` is used if it is greater than either `width` and/or `max_width`. + /// The minimum width of the node. `min_width` is used if it is greater than `width` and/or `max_width`. /// /// pub min_width: Val, - /// The minimum height of the node. `min_height` is used if it is greater than either `height` and/or `max_height`. + /// The minimum height of the node. `min_height` is used if it is greater than `height` and/or `max_height`. /// /// pub min_height: Val, @@ -226,7 +233,7 @@ pub struct Style { pub justify_items: JustifyItems, /// - For Flexbox items, controls cross-axis alignment of the item. - /// - For CSS Grid items, controls block (vertical) axis alignment of a grid item within it's grid area. + /// - For CSS Grid items, controls block (vertical) axis alignment of a grid item within its grid area. /// /// If set to `Auto`, alignment is inherited from the value of [`AlignItems`] set on the parent node. /// @@ -234,11 +241,11 @@ pub struct Style { pub align_self: AlignSelf, /// - For Flexbox items, this property has no effect. See `justify_content` for main-axis alignment of flex items. - /// - For CSS Grid items, controls inline (horizontal) axis alignment of a grid item within it's grid area. + /// - For CSS Grid items, controls inline (horizontal) axis alignment of a grid item within its grid area. /// /// If set to `Auto`, alignment is inherited from the value of [`JustifyItems`] set on the parent node. /// - /// + /// pub justify_self: JustifySelf, /// - For Flexbox containers, controls alignment of lines if flex_wrap is set to [`FlexWrap::Wrap`] and there are multiple lines of items. @@ -270,7 +277,7 @@ pub struct Style { /// ..Default::default() /// }; /// ``` - /// A node with this style and a parent with dimensions of 100px by 300px, will have calculated margins of 10px on both left and right edges, and 15px on both top and bottom edges. + /// A node with this style and a parent with dimensions of 100px by 300px will have calculated margins of 10px on both left and right edges, and 15px on both top and bottom edges. /// /// pub margin: UiRect, @@ -292,7 +299,7 @@ pub struct Style { /// ..Default::default() /// }; /// ``` - /// A node with this style and a parent with dimensions of 300px by 100px, will have calculated padding of 3px on the left, 6px on the right, 9px on the top and 12px on the bottom. + /// A node with this style and a parent with dimensions of 300px by 100px will have calculated padding of 3px on the left, 6px on the right, 9px on the top and 12px on the bottom. /// /// pub padding: UiRect, @@ -306,12 +313,12 @@ pub struct Style { /// pub border: UiRect, - /// Whether a Flexbox container should be a row or a column. This property has no effect of Grid nodes. + /// Whether a Flexbox container should be a row or a column. This property has no effect on Grid nodes. /// /// pub flex_direction: FlexDirection, - /// Whether a Flexbox container should wrap it's contents onto multiple line wrap if they overflow. This property has no effect of Grid nodes. + /// Whether a Flexbox container should wrap its contents onto multiple lines if they overflow. This property has no effect on Grid nodes. /// /// pub flex_wrap: FlexWrap, @@ -328,27 +335,27 @@ pub struct Style { /// The initial length of a flexbox in the main axis, before flex growing/shrinking properties are applied. /// - /// `flex_basis` overrides `size` on the main axis if both are set, but it obeys the bounds defined by `min_size` and `max_size`. + /// `flex_basis` overrides `size` on the main axis if both are set, but it obeys the bounds defined by `min_size` and `max_size`. /// /// pub flex_basis: Val, - /// The size of the gutters between items in a vertical flexbox layout or between rows in a grid layout + /// The size of the gutters between items in a vertical flexbox layout or between rows in a grid layout. /// /// Note: Values of `Val::Auto` are not valid and are treated as zero. /// /// pub row_gap: Val, - /// The size of the gutters between items in a horizontal flexbox layout or between column in a grid layout + /// The size of the gutters between items in a horizontal flexbox layout or between column in a grid layout. /// /// Note: Values of `Val::Auto` are not valid and are treated as zero. /// /// pub column_gap: Val, - /// Controls whether automatically placed grid items are placed row-wise or column-wise. And whether the sparse or dense packing algorithm is used. - /// Only affect Grid layouts + /// Controls whether automatically placed grid items are placed row-wise or column-wise as well as whether the sparse or dense packing algorithm is used. + /// Only affects Grid layouts. /// /// pub grid_auto_flow: GridAutoFlow, @@ -373,7 +380,7 @@ pub struct Style { /// Defines the size of implicitly created columns. Columns are created implicitly when grid items are given explicit placements that are out of bounds /// of the columns explicitly created using `grid_template_columns`. /// - /// + /// pub grid_auto_columns: Vec, /// The row in which a grid item starts and how many rows it spans. @@ -440,7 +447,7 @@ impl Default for Style { #[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Reflect)] #[reflect(PartialEq, Serialize, Deserialize)] pub enum AlignItems { - /// The items are packed in their default position as if no alignment was applied + /// The items are packed in their default position as if no alignment was applied. Default, /// Items are packed towards the start of the axis. Start, @@ -474,7 +481,7 @@ impl Default for AlignItems { #[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Reflect)] #[reflect(PartialEq, Serialize, Deserialize)] pub enum JustifyItems { - /// The items are packed in their default position as if no alignment was applied + /// The items are packed in their default position as if no alignment was applied. Default, /// Items are packed towards the start of the axis. Start, @@ -568,7 +575,7 @@ impl Default for JustifySelf { #[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Reflect)] #[reflect(PartialEq, Serialize, Deserialize)] pub enum AlignContent { - /// The items are packed in their default position as if no alignment was applied + /// The items are packed in their default position as if no alignment was applied. Default, /// Each line moves towards the start of the cross axis. Start, @@ -582,13 +589,13 @@ pub enum AlignContent { Center, /// Each line will stretch to fill the remaining space. Stretch, - /// Each line fills the space it needs, putting the remaining space, if any - /// inbetween the lines. + /// Each line fills the space it needs, putting the remaining space, if any, + /// between the lines. SpaceBetween, - /// The gap between the first and last items is exactly THE SAME as the gap between items. + /// The gap between the first and last items is exactly the same as the gap between items. /// The gaps are distributed evenly. SpaceEvenly, - /// Each line fills the space it needs, putting the remaining space, if any + /// Each line fills the space it needs, putting the remaining space, if any, /// around the lines. SpaceAround, } @@ -607,7 +614,7 @@ impl Default for AlignContent { #[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Reflect)] #[reflect(PartialEq, Serialize, Deserialize)] pub enum JustifyContent { - /// The items are packed in their default position as if no alignment was applied + /// The items are packed in their default position as if no alignment was applied. Default, /// Items are packed toward the start of the axis. Start, @@ -637,9 +644,9 @@ impl Default for JustifyContent { } } -/// Defines the text direction +/// Defines the text direction. /// -/// For example English is written LTR (left-to-right) while Arabic is written RTL (right-to-left). +/// For example, English is written LTR (left-to-right) while Arabic is written RTL (right-to-left). #[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Reflect)] #[reflect(PartialEq, Serialize, Deserialize)] pub enum Direction { @@ -661,7 +668,7 @@ impl Default for Direction { } } -/// Whether to use a Flexbox layout model. +/// Defines the layout model used by this node. /// /// Part of the [`Style`] component. #[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Reflect)] @@ -803,9 +810,7 @@ impl Default for OverflowAxis { pub enum PositionType { /// Relative to all other nodes with the [`PositionType::Relative`] value. Relative, - /// Independent of all other nodes. - /// - /// As usual, the `Style.position` field of this node is specified relative to its parent node. + /// Independent of all other nodes, but relative to its parent node. Absolute, } @@ -841,17 +846,18 @@ impl Default for FlexWrap { } } -/// Controls whether grid items are placed row-wise or column-wise. And whether the sparse or dense packing algorithm is used. +/// Controls whether grid items are placed row-wise or column-wise as well as whether the sparse or dense packing algorithm is used. /// -/// The "dense" packing algorithm attempts to fill in holes earlier in the grid, if smaller items come up later. This may cause items to appear out-of-order, when doing so would fill in holes left by larger items. +/// The "dense" packing algorithm attempts to fill in holes earlier in the grid, if smaller items come up later. +/// This may cause items to appear out-of-order when doing so would fill in holes left by larger items. /// -/// Defaults to [`GridAutoFlow::Row`] +/// Defaults to [`GridAutoFlow::Row`]. /// /// #[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize, Reflect)] #[reflect(PartialEq, Serialize, Deserialize)] pub enum GridAutoFlow { - /// Items are placed by filling each row in turn, adding new rows as necessary + /// Items are placed by filling each row in turn, adding new rows as necessary. Row, /// Items are placed by filling each column in turn, adding new columns as necessary. Column, @@ -904,7 +910,8 @@ pub enum MaxTrackSizingFunction { /// Track maximum size should be automatically sized Auto, /// The dimension as a fraction of the total available grid space (`fr` units in CSS) - /// Specified value is the numerator of the fraction. Denominator is the sum of all fractions specified in that grid dimension + /// Specified value is the numerator of the fraction. Denominator is the sum of all fractions specified in that grid dimension. + /// /// Spec: Fraction(f32), } @@ -944,7 +951,7 @@ impl GridTrack { /// Create a grid track with an `fr` size. /// Note that this will give the track a content-based minimum size. - /// Usually you are best off using `GridTrack::flex` instead which uses a zero minimum size + /// Usually you are best off using `GridTrack::flex` instead which uses a zero minimum size. pub fn fr>(value: f32) -> T { Self { min_sizing_function: MinTrackSizingFunction::Auto, @@ -953,7 +960,7 @@ impl GridTrack { .into() } - /// Create a grid track with an `minmax(0, Nfr)` size. + /// Create a grid track with a `minmax(0, Nfr)` size. pub fn flex>(value: f32) -> T { Self { min_sizing_function: MinTrackSizingFunction::Px(0.0), @@ -962,7 +969,7 @@ impl GridTrack { .into() } - /// Create a grid track which is automatically sized to fit it's contents, and then + /// Create a grid track which is automatically sized to fit its contents. pub fn auto>() -> T { Self { min_sizing_function: MinTrackSizingFunction::Auto, @@ -971,7 +978,7 @@ impl GridTrack { .into() } - /// Create a grid track which is automatically sized to fit it's contents when sized at their "min-content" sizes + /// Create a grid track which is automatically sized to fit its contents when sized at their "min-content" sizes pub fn min_content>() -> T { Self { min_sizing_function: MinTrackSizingFunction::MinContent, @@ -980,7 +987,7 @@ impl GridTrack { .into() } - /// Create a grid track which is automatically sized to fit it's contents when sized at their "max-content" sizes + /// Create a grid track which is automatically sized to fit its contents when sized at their "max-content" sizes pub fn max_content>() -> T { Self { min_sizing_function: MinTrackSizingFunction::MaxContent, @@ -1070,14 +1077,14 @@ impl From for GridTrackRepetition { /// /// The repetition parameter can either be: /// - The integer `1`, in which case the track is non-repeated. -/// - a `u16` count to repeat the track N times -/// - A `GridTrackRepetition::AutoFit` or `GridTrackRepetition::AutoFill` +/// - a `u16` count to repeat the track N times. +/// - A `GridTrackRepetition::AutoFit` or `GridTrackRepetition::AutoFill`. /// /// Note: that in the common case you want a non-repeating track (repetition count 1), you may use the constructor methods on [`GridTrack`] /// to create a `RepeatedGridTrack`. i.e. `GridTrack::px(10.0)` is equivalent to `RepeatedGridTrack::px(1, 10.0)`. /// /// You may only use one auto-repetition per track list. And if your track list contains an auto repetition -/// then all track (in and outside of the repetition) must be fixed size (px or percent). Integer repetitions are just shorthand for writing out +/// then all tracks (in and outside of the repetition) must be fixed size (px or percent). Integer repetitions are just shorthand for writing out /// N tracks longhand and are not subject to the same limitations. #[derive(Clone, PartialEq, Debug, Serialize, Deserialize, Reflect)] #[reflect(PartialEq, Serialize, Deserialize)] @@ -1116,7 +1123,7 @@ impl RepeatedGridTrack { /// Create a repeating set of grid tracks with an `fr` size. /// Note that this will give the track a content-based minimum size. - /// Usually you are best off using `GridTrack::flex` instead which uses a zero minimum size + /// Usually you are best off using `GridTrack::flex` instead which uses a zero minimum size. pub fn fr>(repetition: u16, value: f32) -> T { Self { repetition: GridTrackRepetition::Count(repetition), @@ -1125,7 +1132,7 @@ impl RepeatedGridTrack { .into() } - /// Create a repeating set of grid tracks with an `minmax(0, Nfr)` size. + /// Create a repeating set of grid tracks with a `minmax(0, Nfr)` size. pub fn flex>(repetition: u16, value: f32) -> T { Self { repetition: GridTrackRepetition::Count(repetition), @@ -1245,11 +1252,18 @@ impl From for Vec { /// /// pub struct GridPlacement { - /// The grid line at which the item should start. Lines are 1-indexed. Negative indexes count backwards from the end of the grid. Zero is not a valid index. + /// The grid line at which the item should start. + /// Lines are 1-indexed. + /// Negative indexes count backwards from the end of the grid. + /// Zero is not a valid index. pub(crate) start: Option, - /// How many grid tracks the item should span. Defaults to 1. + /// How many grid tracks the item should span. + /// Defaults to 1. pub(crate) span: Option, - /// The grid line at which the item should end. Lines are 1-indexed. Negative indexes count backwards from the end of the grid. Zero is not a valid index. + /// The grid line at which the item should end. + /// Lines are 1-indexed. + /// Negative indexes count backwards from the end of the grid. + /// Zero is not a valid index. pub(crate) end: Option, } @@ -1269,7 +1283,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `span` is `0` + /// Panics if `span` is `0`. pub fn span(span: u16) -> Self { Self { start: None, @@ -1282,7 +1296,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `start` is `0` + /// Panics if `start` is `0`. pub fn start(start: i16) -> Self { Self { start: try_into_grid_index(start).expect("Invalid start value of 0."), @@ -1294,7 +1308,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `end` is `0` + /// Panics if `end` is `0`. pub fn end(end: i16) -> Self { Self { end: try_into_grid_index(end).expect("Invalid end value of 0."), @@ -1306,7 +1320,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `start` or `span` is `0` + /// Panics if `start` or `span` is `0`. pub fn start_span(start: i16, span: u16) -> Self { Self { start: try_into_grid_index(start).expect("Invalid start value of 0."), @@ -1319,7 +1333,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `start` or `end` is `0` + /// Panics if `start` or `end` is `0`. pub fn start_end(start: i16, end: i16) -> Self { Self { start: try_into_grid_index(start).expect("Invalid start value of 0."), @@ -1332,7 +1346,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `end` or `span` is `0` + /// Panics if `end` or `span` is `0`. pub fn end_span(end: i16, span: u16) -> Self { Self { start: None, @@ -1345,7 +1359,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `start` is `0` + /// Panics if `start` is `0`. pub fn set_start(mut self, start: i16) -> Self { self.start = try_into_grid_index(start).expect("Invalid start value of 0."); self @@ -1355,7 +1369,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `end` is `0` + /// Panics if `end` is `0`. pub fn set_end(mut self, end: i16) -> Self { self.end = try_into_grid_index(end).expect("Invalid end value of 0."); self @@ -1365,7 +1379,7 @@ impl GridPlacement { /// /// # Panics /// - /// Panics if `span` is `0` + /// Panics if `span` is `0`. pub fn set_span(mut self, span: u16) -> Self { self.span = try_into_grid_span(span).expect("Invalid span value of 0."); self @@ -1476,7 +1490,7 @@ impl Default for BorderColor { #[derive(Component, Copy, Clone, Default, Debug, Deserialize, Serialize, Reflect)] #[reflect(Component, Default, Deserialize, Serialize)] /// The [`Outline`] component adds an outline outside the edge of a UI node. -/// Outlines do not take up space in the layout +/// Outlines do not take up space in the layout. /// /// To add an [`Outline`] to a ui node you can spawn a `(NodeBundle, Outline)` tuple bundle: /// ``` @@ -1511,7 +1525,7 @@ impl Default for BorderColor { /// for (entity, interaction, mut maybe_outline) in node_query.iter_mut() { /// let outline_color = /// if matches!(*interaction, Interaction::Hovered) { -/// Color::WHITE +/// Color::WHITE /// } else { /// Color::NONE /// }; @@ -1528,16 +1542,16 @@ impl Default for BorderColor { pub struct Outline { /// The width of the outline. /// - /// Percentage `Val` values are resolved based on the width of the outlined [`Node`] + /// Percentage `Val` values are resolved based on the width of the outlined [`Node`]. pub width: Val, - /// The amount of space between a node's outline the edge of the node + /// The amount of space between a node's outline the edge of the node. /// - /// Percentage `Val` values are resolved based on the width of the outlined [`Node`] + /// Percentage `Val` values are resolved based on the width of the outlined [`Node`]. pub offset: Val, - /// Color of the outline + /// The color of the outline. /// /// If you are frequently toggling outlines for a UI node on and off it is recommended to set `Color::None` to hide the outline. - /// This avoids the table moves that would occcur from the repeated insertion and removal of the `Outline` component. + /// This avoids the table moves that would occur from the repeated insertion and removal of the `Outline` component. pub color: Color, } @@ -1572,14 +1586,14 @@ impl UiImage { } } - /// flip the image along its x-axis + /// Flip the image along its x-axis #[must_use] pub const fn with_flip_x(mut self) -> Self { self.flip_x = true; self } - /// flip the image along its y-axis + /// Flip the image along its y-axis #[must_use] pub const fn with_flip_y(mut self) -> Self { self.flip_y = true; @@ -1607,13 +1621,13 @@ pub struct CalculatedClip { /// /// UI nodes that have the same z-index will appear according to the order in which they /// appear in the UI hierarchy. In such a case, the last node to be added to its parent -/// will appear in front of this parent's other children. +/// will appear in front of its siblings. /// /// Internally, nodes with a global z-index share the stacking context of root UI nodes /// (nodes that have no parent). Because of this, there is no difference between using -/// [`ZIndex::Local(n)`] and [`ZIndex::Global(n)`] for root nodes. +/// `ZIndex::Local(n)` and `ZIndex::Global(n)` for root nodes. /// -/// Nodes without this component will be treated as if they had a value of [`ZIndex::Local(0)`]. +/// Nodes without this component will be treated as if they had a value of `ZIndex::Local(0)`. #[derive(Component, Copy, Clone, Debug, Reflect)] #[reflect(Component)] pub enum ZIndex { From 0716922165183968bd57fef93f559559cc9a976f Mon Sep 17 00:00:00 2001 From: Joseph <21144246+JoJoJet@users.noreply.github.com> Date: Sat, 21 Oct 2023 11:07:52 -0700 Subject: [PATCH 48/63] `ParamSet`s containing non-send parameters should also be non-send (#10211) # Objective Fix #10207 ## Solution Mark a `ParamSet`'s `SystemMeta` as non-send if any of its component parameters are non-send. --- crates/bevy_ecs/macros/src/lib.rs | 4 +++ crates/bevy_ecs/src/system/system_param.rs | 30 ++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/crates/bevy_ecs/macros/src/lib.rs b/crates/bevy_ecs/macros/src/lib.rs index 28d63c6911b14..067191595405c 100644 --- a/crates/bevy_ecs/macros/src/lib.rs +++ b/crates/bevy_ecs/macros/src/lib.rs @@ -201,6 +201,10 @@ pub fn impl_param_set(_input: TokenStream) -> TokenStream { #param::init_state(world, &mut #meta); let #param = #param::init_state(world, &mut system_meta.clone()); )* + // Make the ParamSet non-send if any of its parameters are non-send. + if false #(|| !#meta.is_send())* { + system_meta.set_non_send(); + } #( system_meta .component_access_set diff --git a/crates/bevy_ecs/src/system/system_param.rs b/crates/bevy_ecs/src/system/system_param.rs index e5343002df95a..2b5d73d1ccfe0 100644 --- a/crates/bevy_ecs/src/system/system_param.rs +++ b/crates/bevy_ecs/src/system/system_param.rs @@ -1739,4 +1739,34 @@ mod tests { schedule.add_systems(non_sync_system); schedule.run(&mut world); } + + // Regression test for https://github.com/bevyengine/bevy/issues/10207. + #[test] + fn param_set_non_send_first() { + fn non_send_param_set(mut p: ParamSet<(NonSend<*mut u8>, ())>) { + let _ = p.p0(); + p.p1(); + } + + let mut world = World::new(); + world.insert_non_send_resource(std::ptr::null_mut::()); + let mut schedule = crate::schedule::Schedule::default(); + schedule.add_systems((non_send_param_set, non_send_param_set, non_send_param_set)); + schedule.run(&mut world); + } + + // Regression test for https://github.com/bevyengine/bevy/issues/10207. + #[test] + fn param_set_non_send_second() { + fn non_send_param_set(mut p: ParamSet<((), NonSendMut<*mut u8>)>) { + p.p0(); + let _ = p.p1(); + } + + let mut world = World::new(); + world.insert_non_send_resource(std::ptr::null_mut::()); + let mut schedule = crate::schedule::Schedule::default(); + schedule.add_systems((non_send_param_set, non_send_param_set, non_send_param_set)); + schedule.run(&mut world); + } } From 9cfada3f22ad99ff378b8128553ee347864ca038 Mon Sep 17 00:00:00 2001 From: Kanabenki Date: Sat, 21 Oct 2023 21:10:37 +0200 Subject: [PATCH 49/63] Detect cubemap for dds textures (#10222) # Objective - Closes #10049. - Detect DDS texture containing a cubemap or a cubemap array. ## Solution - When loading a dds texture, the header capabilities are checked for the cubemap flag. An error is returned if not all faces are provided. --- ## Changelog ### Added - Added a new texture error `TextureError::IncompleteCubemap`, used for dds cubemap textures containing less than 6 faces, as that is not supported on modern graphics APIs. ### Fixed - DDS cubemaps are now loaded as cubemaps instead of 2D textures. ## Migration Guide If you are matching on a `TextureError`, you will need to add a new branch to handle `TextureError::IncompleteCubemap`. --- crates/bevy_render/src/texture/dds.rs | 42 ++++++++++++++++++++----- crates/bevy_render/src/texture/image.rs | 3 ++ 2 files changed, 38 insertions(+), 7 deletions(-) diff --git a/crates/bevy_render/src/texture/dds.rs b/crates/bevy_render/src/texture/dds.rs index 6f773193ab9a0..16f1aa7240f97 100644 --- a/crates/bevy_render/src/texture/dds.rs +++ b/crates/bevy_render/src/texture/dds.rs @@ -1,6 +1,8 @@ -use ddsfile::{D3DFormat, Dds, DxgiFormat}; +use ddsfile::{Caps2, D3DFormat, Dds, DxgiFormat}; use std::io::Cursor; -use wgpu::{Extent3d, TextureDimension, TextureFormat}; +use wgpu::{ + Extent3d, TextureDimension, TextureFormat, TextureViewDescriptor, TextureViewDimension, +}; use super::{CompressedImageFormats, Image, TextureError}; @@ -18,14 +20,29 @@ pub fn dds_buffer_to_image( ))); } let mut image = Image::default(); + let is_cubemap = dds.header.caps2.contains(Caps2::CUBEMAP); + let mut depth_or_array_layers = if dds.get_num_array_layers() > 1 { + dds.get_num_array_layers() + } else { + dds.get_depth() + }; + if is_cubemap { + if !dds.header.caps2.contains( + Caps2::CUBEMAP_NEGATIVEX + | Caps2::CUBEMAP_NEGATIVEY + | Caps2::CUBEMAP_NEGATIVEZ + | Caps2::CUBEMAP_POSITIVEX + | Caps2::CUBEMAP_POSITIVEY + | Caps2::CUBEMAP_POSITIVEZ, + ) { + return Err(TextureError::IncompleteCubemap); + } + depth_or_array_layers *= 6; + } image.texture_descriptor.size = Extent3d { width: dds.get_width(), height: dds.get_height(), - depth_or_array_layers: if dds.get_num_array_layers() > 1 { - dds.get_num_array_layers() - } else { - dds.get_depth() - }, + depth_or_array_layers, } .physical_size(texture_format); image.texture_descriptor.mip_level_count = dds.get_num_mipmap_levels(); @@ -37,6 +54,17 @@ pub fn dds_buffer_to_image( } else { TextureDimension::D1 }; + if is_cubemap { + let dimension = if image.texture_descriptor.size.depth_or_array_layers > 6 { + TextureViewDimension::CubeArray + } else { + TextureViewDimension::Cube + }; + image.texture_view_descriptor = Some(TextureViewDescriptor { + dimension: Some(dimension), + ..Default::default() + }); + } image.data = dds.data; Ok(image) } diff --git a/crates/bevy_render/src/texture/image.rs b/crates/bevy_render/src/texture/image.rs index d14388d87504c..8adf57267a2fe 100644 --- a/crates/bevy_render/src/texture/image.rs +++ b/crates/bevy_render/src/texture/image.rs @@ -438,6 +438,9 @@ pub enum TextureError { TranscodeError(String), #[error("format requires transcoding: {0:?}")] FormatRequiresTranscodingError(TranscodeFormat), + /// Only cubemaps with six faces are supported. + #[error("only cubemaps with six faces are supported")] + IncompleteCubemap, } /// The type of a raw image buffer. From c3627248f5bdac9b00666b9170e6bfa575794631 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois?= Date: Sun, 22 Oct 2023 00:19:46 +0200 Subject: [PATCH 50/63] Fix alignment on ios simulator (#10178) # Objective - Fix #10165 - On iOS simulator on apple silicon Macs, shader validation is going through the host, but device limits are reported for the device. They sometimes differ, and cause the validation to crash on something that should work ``` -[MTLDebugRenderCommandEncoder validateCommonDrawErrors:]:5775: failed assertion `Draw Errors Validation Fragment Function(fragment_): the offset into the buffer _naga_oil_mod_MJSXM6K7OBRHEOR2NVSXG2C7OZUWK527MJUW4ZDJNZTXG_memberfog that is bound at buffer index 6 must be a multiple of 256 but was set to 448. ``` ## Solution - Add a custom flag when building for the simulator and override the buffer alignment --- .../src/render_resource/uniform_buffer.rs | 12 ++++++++++-- examples/mobile/.cargo/config.toml | 5 +++++ examples/mobile/build_rust_deps.sh | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 examples/mobile/.cargo/config.toml diff --git a/crates/bevy_render/src/render_resource/uniform_buffer.rs b/crates/bevy_render/src/render_resource/uniform_buffer.rs index 95196568ff20a..7e1b86869c4e0 100644 --- a/crates/bevy_render/src/render_resource/uniform_buffer.rs +++ b/crates/bevy_render/src/render_resource/uniform_buffer.rs @@ -277,8 +277,16 @@ impl DynamicUniformBuffer { device: &RenderDevice, queue: &'a RenderQueue, ) -> Option> { - let alignment = - AlignmentValue::new(device.limits().min_uniform_buffer_offset_alignment as u64); + let alignment = if cfg!(ios_simulator) { + // On iOS simulator on silicon macs, metal validation check that the host OS alignment + // is respected, but the device reports the correct value for iOS, which is smaller. + // Use the larger value. + // See https://github.com/bevyengine/bevy/pull/10178 - remove if it's not needed anymore. + AlignmentValue::new(256) + } else { + AlignmentValue::new(device.limits().min_uniform_buffer_offset_alignment as u64) + }; + let mut capacity = self.buffer.as_deref().map(wgpu::Buffer::size).unwrap_or(0); let size = alignment .round_up(T::min_size().get()) diff --git a/examples/mobile/.cargo/config.toml b/examples/mobile/.cargo/config.toml new file mode 100644 index 0000000000000..19a634093de05 --- /dev/null +++ b/examples/mobile/.cargo/config.toml @@ -0,0 +1,5 @@ +# Flag to notify the compiler we're building for the iOS simulator from an Apple silicon mac +# This needs some workarounds for now +# See https://github.com/bevyengine/bevy/pull/10178 - remove if it's not needed anymore. +[target.aarch64-apple-ios-sim] +rustflags = ["--cfg=ios_simulator"] diff --git a/examples/mobile/build_rust_deps.sh b/examples/mobile/build_rust_deps.sh index 2914faad5c0d0..e232648a2be91 100755 --- a/examples/mobile/build_rust_deps.sh +++ b/examples/mobile/build_rust_deps.sh @@ -48,7 +48,7 @@ for arch in $ARCHS; do # Hardware iOS targets cargo rustc --crate-type staticlib --lib $RELFLAG --target aarch64-apple-ios else - # M1 iOS simulator -- currently in Nightly only and requires to build `libstd` + # M1 iOS simulator cargo rustc --crate-type staticlib --lib $RELFLAG --target aarch64-apple-ios-sim fi esac From 8efcbf3e4f1b645b6fefd979fa72e6e47f10dc6a Mon Sep 17 00:00:00 2001 From: st0rmbtw <61053971+st0rmbtw@users.noreply.github.com> Date: Sun, 22 Oct 2023 04:45:29 +0300 Subject: [PATCH 51/63] Add convenient methods for Image (#10221) # Objective To get the width or height of an image you do: ```rust self.texture_descriptor.size.{width, height} ``` that is quite verbose. This PR adds some convenient methods for Image to reduce verbosity. ## Changelog * Add a `width()` method for getting the width of an image. * Add a `height()` method for getting the height of an image. * Rename the `size()` method to `size_f32()`. * Add a `size()` method for getting the size of an image as u32. * Renamed the `aspect_2d()` method to `aspect_ratio()`. ## Migration Guide Replace calls to the `Image::size()` method with `size_f32()`. Replace calls to the `Image::aspect_2d()` method with `aspect_ratio()`. --- .../src/texture/compressed_image_saver.rs | 2 +- crates/bevy_render/src/texture/image.rs | 32 +++++++++++++------ crates/bevy_sprite/src/lib.rs | 2 +- examples/3d/tonemapping.rs | 2 +- 4 files changed, 25 insertions(+), 13 deletions(-) diff --git a/crates/bevy_render/src/texture/compressed_image_saver.rs b/crates/bevy_render/src/texture/compressed_image_saver.rs index 26ab7d22785ce..a557447db3d46 100644 --- a/crates/bevy_render/src/texture/compressed_image_saver.rs +++ b/crates/bevy_render/src/texture/compressed_image_saver.rs @@ -40,7 +40,7 @@ impl AssetSaver for CompressedImageSaver { let mut source_image = compressor_params.source_image_mut(0); let size = image.size(); - source_image.init(&image.data, size.x as u32, size.y as u32, 4); + source_image.init(&image.data, size.x, size.y, 4); let mut compressor = basis_universal::Compressor::new(4); // SAFETY: the CompressorParams are "valid" to the best of our knowledge. The basis-universal diff --git a/crates/bevy_render/src/texture/image.rs b/crates/bevy_render/src/texture/image.rs index 8adf57267a2fe..aac9759278e1b 100644 --- a/crates/bevy_render/src/texture/image.rs +++ b/crates/bevy_render/src/texture/image.rs @@ -14,7 +14,7 @@ use crate::{ use bevy_asset::Asset; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::system::{lifetimeless::SRes, Resource, SystemParamItem}; -use bevy_math::Vec2; +use bevy_math::{UVec2, Vec2}; use bevy_reflect::Reflect; use serde::{Deserialize, Serialize}; use std::hash::Hash; @@ -255,16 +255,28 @@ impl Image { } /// Returns the aspect ratio (height/width) of a 2D image. - pub fn aspect_2d(&self) -> f32 { - self.texture_descriptor.size.height as f32 / self.texture_descriptor.size.width as f32 + pub fn aspect_ratio(&self) -> f32 { + self.height() as f32 / self.width() as f32 + } + + /// Returns the width of a 2D image. + pub fn width(&self) -> u32 { + self.texture_descriptor.size.width + } + + /// Returns the height of a 2D image. + pub fn height(&self) -> u32 { + self.texture_descriptor.size.height + } + + /// Returns the size of a 2D image as f32. + pub fn size_f32(&self) -> Vec2 { + Vec2::new(self.width() as f32, self.height() as f32) } /// Returns the size of a 2D image. - pub fn size(&self) -> Vec2 { - Vec2::new( - self.texture_descriptor.size.width as f32, - self.texture_descriptor.size.height as f32, - ) + pub fn size(&self) -> UVec2 { + UVec2::new(self.width(), self.height()) } /// Resizes the image to the new size, by removing information or appending 0 to the `data`. @@ -636,12 +648,12 @@ mod test { ); assert_eq!( Vec2::new(size.width as f32, size.height as f32), - image.size() + image.size_f32() ); } #[test] fn image_default_size() { let image = Image::default(); - assert_eq!(Vec2::ONE, image.size()); + assert_eq!(Vec2::ONE, image.size_f32()); } } diff --git a/crates/bevy_sprite/src/lib.rs b/crates/bevy_sprite/src/lib.rs index 1eb3b1a5cc64c..9f68a9a3b981e 100644 --- a/crates/bevy_sprite/src/lib.rs +++ b/crates/bevy_sprite/src/lib.rs @@ -138,7 +138,7 @@ pub fn calculate_bounds_2d( for (entity, sprite, texture_handle) in &sprites_without_aabb { if let Some(size) = sprite .custom_size - .or_else(|| images.get(texture_handle).map(|image| image.size())) + .or_else(|| images.get(texture_handle).map(|image| image.size_f32())) { let aabb = Aabb { center: (-sprite.anchor.as_vec() * size).extend(0.0).into(), diff --git a/examples/3d/tonemapping.rs b/examples/3d/tonemapping.rs index 595f3b0b4ecac..a7eb2973392ec 100644 --- a/examples/3d/tonemapping.rs +++ b/examples/3d/tonemapping.rs @@ -336,7 +336,7 @@ fn update_image_viewer( if let Some(base_color_texture) = mat.base_color_texture.clone() { if image_changed_id == base_color_texture.id() { if let Some(image_changed) = images.get(image_changed_id) { - let size = image_changed.size().normalize_or_zero() * 1.4; + let size = image_changed.size_f32().normalize_or_zero() * 1.4; // Resize Mesh let quad = Mesh::from(shape::Quad::new(size)); meshes.insert(mesh_h, quad); From 60773e6787d177e97458f9fcf118985906762b2a Mon Sep 17 00:00:00 2001 From: Gino Valente <49806985+MrGVSV@users.noreply.github.com> Date: Sun, 22 Oct 2023 05:43:31 -0700 Subject: [PATCH 52/63] bevy_reflect: Fix ignored/skipped field order (#7575) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Objective Fixes #5101 Alternative to #6511 ## Solution Corrected the behavior for ignored fields in `FromReflect`, which was previously using the incorrect field indexes. Similarly, fields marked with `#[reflect(skip_serializing)]` no longer break when using `FromReflect` after deserialization. This was done by modifying `SerializationData` to store a function pointer that can later be used to generate a default instance of the skipped field during deserialization. The function pointer points to a function generated by the derive macro using the behavior designated by `#[reflect(default)]` (or just `Default` if none provided). The entire output of the macro is now wrapped in an [unnamed constant](https://doc.rust-lang.org/stable/reference/items/constant-items.html#unnamed-constant) which keeps this behavior hygienic. #### Rationale The biggest downside to this approach is that it requires fields marked `#[reflect(skip_serializing)]` to provide the ability to create a default instance— either via a `Default` impl or by specifying a custom one. While this isn't great, I think it might be justified by the fact that we really need to create this value when using `FromReflect` on a deserialized object. And we need to do this _during_ deserialization because after that (at least for tuples and tuple structs) we lose information about which field is which: _"is the value at index 1 in this `DynamicTupleStruct` the actual value for index 1 or is it really the value for index 2 since index 1 is skippable...?"_ #### Alternatives An alternative would be to store `Option>` within `DynamicTuple` and `DynamicTupleStruct` instead of just `Box`. This would allow us to insert "empty"/"missing" fields during deserialization, thus saving the positional information of the skipped fields. However, this may require changing the API of `Tuple` and `TupleStruct` such that they can account for their dynamic counterparts returning `None` for a skipped field. In practice this would probably mean exposing the `Option`-ness of the dynamics onto implementors via methods like `Tuple::drain` or `TupleStruct::field`. Personally, I think requiring `Default` would be better than muddying up the API to account for these special cases. But I'm open to trying out this other approach if the community feels that it's better. --- ## Changelog ### Public Changes #### Fixed - The behaviors of `#[reflect(ignore)]` and `#[reflect(skip_serializing)]` are no longer dependent on field order #### Changed - Fields marked with `#[reflect(skip_serializing)]` now need to either implement `Default` or specify a custom default function using `#[reflect(default = "path::to::some_func")]` - Deserializing a type with fields marked `#[reflect(skip_serializing)]` will now include that field initialized to its specified default value - `SerializationData::new` now takes the new `SkippedField` struct along with the skipped field index - Renamed `SerializationData::is_ignored_field` to `SerializationData::is_field_skipped` #### Added - Added `SkippedField` struct - Added methods `SerializationData::generate_default` and `SerializationData::iter_skipped` ### Internal Changes #### Changed - Replaced `members_to_serialization_denylist` and `BitSet` with `SerializationDataDef` - The `Reflect` derive is more hygienic as it now outputs within an [unnamed constant](https://doc.rust-lang.org/stable/reference/items/constant-items.html#unnamed-constant) - `StructField::index` has been split up into `StructField::declaration_index` and `StructField::reflection_index` #### Removed - Removed `bitset` dependency ## Migration Guide * Fields marked `#[reflect(skip_serializing)]` now must implement `Default` or specify a custom default function with `#[reflect(default = "path::to::some_func")]` ```rust #[derive(Reflect)] struct MyStruct { #[reflect(skip_serializing)] #[reflect(default = "get_foo_default")] foo: Foo, // <- `Foo` does not impl `Default` so requires a custom function #[reflect(skip_serializing)] bar: Bar, // <- `Bar` impls `Default` } #[derive(Reflect)] struct Foo(i32); #[derive(Reflect, Default)] struct Bar(i32); fn get_foo_default() -> Foo { Foo(123) } ``` * `SerializationData::new` has been changed to expect an iterator of `(usize, SkippedField)` rather than one of just `usize` ```rust // BEFORE SerializationData::new([0, 3].into_iter()); // AFTER SerializationData::new([ (0, SkippedField::new(field_0_default_fn)), (3, SkippedField::new(field_3_default_fn)), ].into_iter()); ``` * `Serialization::is_ignored_field` has been renamed to `Serialization::is_field_skipped` * Fields marked `#[reflect(skip_serializing)]` are now included in deserialization output. This may affect logic that expected those fields to be absent. --- .../bevy_reflect_derive/Cargo.toml | 1 - .../bevy_reflect_derive/src/derive_data.rs | 62 ++-- .../bevy_reflect_derive/src/from_reflect.rs | 10 +- .../bevy_reflect_derive/src/impls/enums.rs | 6 +- .../bevy_reflect_derive/src/impls/structs.rs | 4 +- .../src/impls/tuple_structs.rs | 2 +- .../bevy_reflect_derive/src/lib.rs | 94 +++-- .../bevy_reflect_derive/src/registration.rs | 13 +- .../bevy_reflect_derive/src/serialization.rs | 91 +++++ .../bevy_reflect_derive/src/utility.rs | 38 -- crates/bevy_reflect/src/lib.rs | 33 ++ crates/bevy_reflect/src/serde/de.rs | 333 +++++++++--------- crates/bevy_reflect/src/serde/mod.rs | 67 +++- crates/bevy_reflect/src/serde/ser.rs | 4 +- crates/bevy_reflect/src/serde/type_data.rs | 132 +++++-- crates/bevy_reflect/src/tuple_struct.rs | 13 +- 16 files changed, 607 insertions(+), 296 deletions(-) create mode 100644 crates/bevy_reflect/bevy_reflect_derive/src/serialization.rs diff --git a/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml b/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml index 5073873b638c5..bfb239e8105c5 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml +++ b/crates/bevy_reflect/bevy_reflect_derive/Cargo.toml @@ -23,4 +23,3 @@ syn = { version = "2.0", features = ["full"] } proc-macro2 = "1.0" quote = "1.0" uuid = { version = "1.1", features = ["v4"] } -bit-set = "0.5.2" diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/derive_data.rs b/crates/bevy_reflect/bevy_reflect_derive/src/derive_data.rs index 0e5ef21dcaf69..59b3e2dd08a30 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/derive_data.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/derive_data.rs @@ -1,11 +1,11 @@ use crate::container_attributes::{FromReflectAttrs, ReflectTraits}; use crate::field_attributes::{parse_field_attrs, ReflectFieldAttr}; use crate::type_path::parse_path_no_leading_colon; -use crate::utility::{members_to_serialization_denylist, StringExpr, WhereClauseOptions}; -use bit_set::BitSet; +use crate::utility::{StringExpr, WhereClauseOptions}; use quote::{quote, ToTokens}; use syn::token::Comma; +use crate::serialization::SerializationDataDef; use crate::{ utility, REFLECT_ATTRIBUTE_NAME, REFLECT_VALUE_ATTRIBUTE_NAME, TYPE_NAME_ATTRIBUTE_NAME, TYPE_PATH_ATTRIBUTE_NAME, @@ -65,7 +65,7 @@ pub(crate) struct ReflectMeta<'a> { /// ``` pub(crate) struct ReflectStruct<'a> { meta: ReflectMeta<'a>, - serialization_denylist: BitSet, + serialization_data: Option, fields: Vec>, } @@ -95,7 +95,14 @@ pub(crate) struct StructField<'a> { /// The reflection-based attributes on the field. pub attrs: ReflectFieldAttr, /// The index of this field within the struct. - pub index: usize, + pub declaration_index: usize, + /// The index of this field as seen by the reflection API. + /// + /// This index accounts for the removal of [ignored] fields. + /// It will only be `Some(index)` when the field is not ignored. + /// + /// [ignored]: crate::field_attributes::ReflectIgnoreBehavior::IgnoreAlways + pub reflection_index: Option, /// The documentation for this field, if any #[cfg(feature = "documentation")] pub doc: crate::documentation::Documentation, @@ -272,9 +279,7 @@ impl<'a> ReflectDerive<'a> { let fields = Self::collect_struct_fields(&data.fields)?; let reflect_struct = ReflectStruct { meta, - serialization_denylist: members_to_serialization_denylist( - fields.iter().map(|v| v.attrs.ignore), - ), + serialization_data: SerializationDataDef::new(&fields)?, fields, }; @@ -308,19 +313,31 @@ impl<'a> ReflectDerive<'a> { } fn collect_struct_fields(fields: &'a Fields) -> Result>, syn::Error> { + let mut active_index = 0; let sifter: utility::ResultSifter> = fields .iter() .enumerate() - .map(|(index, field)| -> Result { - let attrs = parse_field_attrs(&field.attrs)?; - Ok(StructField { - index, - attrs, - data: field, - #[cfg(feature = "documentation")] - doc: crate::documentation::Documentation::from_attributes(&field.attrs), - }) - }) + .map( + |(declaration_index, field)| -> Result { + let attrs = parse_field_attrs(&field.attrs)?; + + let reflection_index = if attrs.ignore.is_ignored() { + None + } else { + active_index += 1; + Some(active_index - 1) + }; + + Ok(StructField { + declaration_index, + reflection_index, + attrs, + data: field, + #[cfg(feature = "documentation")] + doc: crate::documentation::Documentation::from_attributes(&field.attrs), + }) + }, + ) .fold( utility::ResultSifter::default(), utility::ResultSifter::fold, @@ -420,12 +437,9 @@ impl<'a> ReflectStruct<'a> { &self.meta } - /// Access the data about which fields should be ignored during serialization. - /// - /// The returned bitset is a collection of indices obtained from the [`members_to_serialization_denylist`] function. - #[allow(dead_code)] - pub fn serialization_denylist(&self) -> &BitSet { - &self.serialization_denylist + /// Returns the [`SerializationDataDef`] for this struct. + pub fn serialization_data(&self) -> Option<&SerializationDataDef> { + self.serialization_data.as_ref() } /// Returns the `GetTypeRegistration` impl as a `TokenStream`. @@ -438,7 +452,7 @@ impl<'a> ReflectStruct<'a> { crate::registration::impl_get_type_registration( self.meta(), where_clause_options, - Some(&self.serialization_denylist), + self.serialization_data(), ) } diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs b/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs index 69525bd759210..bca7162de8b2d 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/from_reflect.rs @@ -189,7 +189,7 @@ fn get_ignored_fields(reflect_struct: &ReflectStruct) -> MemberValuePair { reflect_struct .ignored_fields() .map(|field| { - let member = ident_or_index(field.data.ident.as_ref(), field.index); + let member = ident_or_index(field.data.ident.as_ref(), field.declaration_index); let value = match &field.attrs.default { DefaultBehavior::Func(path) => quote! {#path()}, @@ -218,8 +218,12 @@ fn get_active_fields( reflect_struct .active_fields() .map(|field| { - let member = ident_or_index(field.data.ident.as_ref(), field.index); - let accessor = get_field_accessor(field.data, field.index, is_tuple); + let member = ident_or_index(field.data.ident.as_ref(), field.declaration_index); + let accessor = get_field_accessor( + field.data, + field.reflection_index.expect("field should be active"), + is_tuple, + ); let ty = field.data.ty.clone(); let get_field = quote! { diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/impls/enums.rs b/crates/bevy_reflect/bevy_reflect_derive/src/impls/enums.rs index 8eec84fcac678..a733ec2e262bf 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/impls/enums.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/impls/enums.rs @@ -346,7 +346,11 @@ fn generate_impls(reflect_enum: &ReflectEnum, ref_index: &Ident, ref_name: &Iden // Ignored field continue; } - constructor_argument.push(generate_for_field(reflect_idx, field.index, field)); + constructor_argument.push(generate_for_field( + reflect_idx, + field.declaration_index, + field, + )); reflect_idx += 1; } constructor_argument diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/impls/structs.rs b/crates/bevy_reflect/bevy_reflect_derive/src/impls/structs.rs index 60a5c14cbc369..1bf46968cebdc 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/impls/structs.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/impls/structs.rs @@ -19,12 +19,12 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS .ident .as_ref() .map(|i| i.to_string()) - .unwrap_or_else(|| field.index.to_string()) + .unwrap_or_else(|| field.declaration_index.to_string()) }) .collect::>(); let field_idents = reflect_struct .active_fields() - .map(|field| ident_or_index(field.data.ident.as_ref(), field.index)) + .map(|field| ident_or_index(field.data.ident.as_ref(), field.declaration_index)) .collect::>(); let field_types = reflect_struct.active_types(); let field_count = field_idents.len(); diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/impls/tuple_structs.rs b/crates/bevy_reflect/bevy_reflect_derive/src/impls/tuple_structs.rs index ed507f3714d10..e05226d7a52b6 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/impls/tuple_structs.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/impls/tuple_structs.rs @@ -14,7 +14,7 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: let field_idents = reflect_struct .active_fields() - .map(|field| Member::Unnamed(Index::from(field.index))) + .map(|field| Member::Unnamed(Index::from(field.declaration_index))) .collect::>(); let field_types = reflect_struct.active_types(); let field_count = field_idents.len(); diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs b/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs index 5474f143cda99..e87d3ccf5c8d3 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/lib.rs @@ -24,6 +24,7 @@ mod from_reflect; mod impls; mod reflect_value; mod registration; +mod serialization; mod trait_reflection; mod type_path; mod type_uuid; @@ -201,8 +202,10 @@ pub fn derive_reflect(input: TokenStream) -> TokenStream { }; TokenStream::from(quote! { - #reflect_impls - #from_reflect_impl + const _: () = { + #reflect_impls + #from_reflect_impl + }; }) } @@ -241,15 +244,20 @@ pub fn derive_from_reflect(input: TokenStream) -> TokenStream { Err(err) => return err.into_compile_error().into(), }; - match derive_data { + let from_reflect_impl = match derive_data { ReflectDerive::Struct(struct_data) | ReflectDerive::UnitStruct(struct_data) => { from_reflect::impl_struct(&struct_data) } ReflectDerive::TupleStruct(struct_data) => from_reflect::impl_tuple_struct(&struct_data), ReflectDerive::Enum(meta) => from_reflect::impl_enum(&meta), ReflectDerive::Value(meta) => from_reflect::impl_value(&meta), - } - .into() + }; + + TokenStream::from(quote! { + const _: () = { + #from_reflect_impl + }; + }) } /// Derives the `TypePath` trait, providing a stable alternative to [`std::any::type_name`]. @@ -275,21 +283,31 @@ pub fn derive_type_path(input: TokenStream) -> TokenStream { Err(err) => return err.into_compile_error().into(), }; - impls::impl_type_path( + let type_path_impl = impls::impl_type_path( derive_data.meta(), // Use `WhereClauseOptions::new_value` here so we don't enforce reflection bounds &WhereClauseOptions::new_value(derive_data.meta()), - ) - .into() + ); + + TokenStream::from(quote! { + const _: () = { + #type_path_impl + }; + }) } // From https://github.com/randomPoison/type-uuid #[proc_macro_derive(TypeUuid, attributes(uuid))] pub fn derive_type_uuid(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); - type_uuid::type_uuid_derive(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() + let uuid_impl = + type_uuid::type_uuid_derive(input).unwrap_or_else(syn::Error::into_compile_error); + + TokenStream::from(quote! { + const _: () = { + #uuid_impl + }; + }) } /// A macro that automatically generates type data for traits, which their implementors can then register. @@ -401,8 +419,10 @@ pub fn impl_reflect_value(input: TokenStream) -> TokenStream { let from_reflect_impl = from_reflect::impl_value(&meta); TokenStream::from(quote! { - #reflect_impls - #from_reflect_impl + const _: () = { + #reflect_impls + #from_reflect_impl + }; }) } @@ -446,7 +466,7 @@ pub fn impl_reflect_struct(input: TokenStream) -> TokenStream { Err(err) => return err.into_compile_error().into(), }; - match derive_data { + let output = match derive_data { ReflectDerive::Struct(struct_data) => { if !struct_data.meta().type_path().has_custom_path() { return syn::Error::new( @@ -460,27 +480,30 @@ pub fn impl_reflect_struct(input: TokenStream) -> TokenStream { let impl_struct = impls::impl_struct(&struct_data); let impl_from_struct = from_reflect::impl_struct(&struct_data); - TokenStream::from(quote! { + quote! { #impl_struct #impl_from_struct - }) + } } ReflectDerive::TupleStruct(..) => syn::Error::new( ast.span(), "impl_reflect_struct does not support tuple structs", ) - .into_compile_error() - .into(), + .into_compile_error(), ReflectDerive::UnitStruct(..) => syn::Error::new( ast.span(), "impl_reflect_struct does not support unit structs", ) - .into_compile_error() - .into(), + .into_compile_error(), _ => syn::Error::new(ast.span(), "impl_reflect_struct only supports structs") - .into_compile_error() - .into(), - } + .into_compile_error(), + }; + + TokenStream::from(quote! { + const _: () = { + #output + }; + }) } /// A macro used to generate a `FromReflect` trait implementation for the given type. @@ -521,7 +544,14 @@ pub fn impl_from_reflect_value(input: TokenStream) -> TokenStream { } }; - from_reflect::impl_value(&ReflectMeta::new(type_path, def.traits.unwrap_or_default())).into() + let from_reflect_impl = + from_reflect::impl_value(&ReflectMeta::new(type_path, def.traits.unwrap_or_default())); + + TokenStream::from(quote! { + const _: () = { + #from_reflect_impl + }; + }) } /// A replacement for [deriving `TypePath`] for use on foreign types. @@ -583,12 +613,24 @@ pub fn impl_type_path(input: TokenStream) -> TokenStream { let meta = ReflectMeta::new(type_path, ReflectTraits::default()); - impls::impl_type_path(&meta, &WhereClauseOptions::new_value(&meta)).into() + let type_path_impl = impls::impl_type_path(&meta, &WhereClauseOptions::new_value(&meta)); + + TokenStream::from(quote! { + const _: () = { + #type_path_impl + }; + }) } /// Derives `TypeUuid` for the given type. This is used internally to implement `TypeUuid` on foreign types, such as those in the std. This macro should be used in the format of `<[Generic Params]> [Type (Path)], [Uuid (String Literal)]`. #[proc_macro] pub fn impl_type_uuid(input: TokenStream) -> TokenStream { let def = parse_macro_input!(input as type_uuid::TypeUuidDef); - type_uuid::gen_impl_type_uuid(def).into() + let uuid_impl = type_uuid::gen_impl_type_uuid(def); + + TokenStream::from(quote! { + const _: () = { + #uuid_impl + }; + }) } diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/registration.rs b/crates/bevy_reflect/bevy_reflect_derive/src/registration.rs index 0b0a31e0a38fd..115274ad46ae1 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/registration.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/registration.rs @@ -1,8 +1,8 @@ //! Contains code related specifically to Bevy's type registration. use crate::derive_data::ReflectMeta; +use crate::serialization::SerializationDataDef; use crate::utility::{extend_where_clause, WhereClauseOptions}; -use bit_set::BitSet; use quote::quote; /// Creates the `GetTypeRegistration` impl for the given type data. @@ -10,7 +10,7 @@ use quote::quote; pub(crate) fn impl_get_type_registration( meta: &ReflectMeta, where_clause_options: &WhereClauseOptions, - serialization_denylist: Option<&BitSet>, + serialization_data: Option<&SerializationDataDef>, ) -> proc_macro2::TokenStream { let type_path = meta.type_path(); let bevy_reflect_path = meta.bevy_reflect_path(); @@ -20,17 +20,16 @@ pub(crate) fn impl_get_type_registration( let from_reflect_data = if meta.from_reflect().should_auto_derive() { Some(quote! { - registration.insert::<#bevy_reflect_path::ReflectFromReflect>(#bevy_reflect_path::FromType::::from_type()); + registration.insert::<#bevy_reflect_path::ReflectFromReflect>(#bevy_reflect_path::FromType::::from_type()); }) } else { None }; - let serialization_data = serialization_denylist.map(|denylist| { - let denylist = denylist.into_iter(); + let serialization_data = serialization_data.map(|data| { + let serialization_data = data.as_serialization_data(bevy_reflect_path); quote! { - let ignored_indices = ::core::iter::IntoIterator::into_iter([#(#denylist),*]); - registration.insert::<#bevy_reflect_path::serde::SerializationData>(#bevy_reflect_path::serde::SerializationData::new(ignored_indices)); + registration.insert::<#bevy_reflect_path::serde::SerializationData>(#serialization_data); } }); diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/serialization.rs b/crates/bevy_reflect/bevy_reflect_derive/src/serialization.rs new file mode 100644 index 0000000000000..0242947b5c91f --- /dev/null +++ b/crates/bevy_reflect/bevy_reflect_derive/src/serialization.rs @@ -0,0 +1,91 @@ +use crate::derive_data::StructField; +use crate::field_attributes::{DefaultBehavior, ReflectIgnoreBehavior}; +use bevy_macro_utils::fq_std::{FQBox, FQDefault}; +use quote::quote; +use std::collections::HashMap; +use syn::spanned::Spanned; +use syn::Path; + +type ReflectionIndex = usize; + +/// Collected serialization data used to generate a `SerializationData` type. +pub(crate) struct SerializationDataDef { + /// Maps a field's _reflection_ index to its [`SkippedFieldDef`] if marked as `#[reflect(skip_serializing)]`. + skipped: HashMap, +} + +impl SerializationDataDef { + /// Attempts to create a new `SerializationDataDef` from the given collection of fields. + /// + /// Returns `Ok(Some(data))` if there are any fields needing to be skipped during serialization. + /// Otherwise, returns `Ok(None)`. + pub fn new(fields: &[StructField<'_>]) -> Result, syn::Error> { + let mut skipped = HashMap::default(); + + for field in fields { + match field.attrs.ignore { + ReflectIgnoreBehavior::IgnoreSerialization => { + skipped.insert( + field.reflection_index.ok_or_else(|| { + syn::Error::new( + field.data.span(), + "internal error: field is missing a reflection index", + ) + })?, + SkippedFieldDef::new(field)?, + ); + } + _ => continue, + } + } + + if skipped.is_empty() { + Ok(None) + } else { + Ok(Some(Self { skipped })) + } + } + + /// Returns a `TokenStream` containing an initialized `SerializationData` type. + pub fn as_serialization_data(&self, bevy_reflect_path: &Path) -> proc_macro2::TokenStream { + let fields = + self.skipped + .iter() + .map(|(reflection_index, SkippedFieldDef { default_fn })| { + quote! {( + #reflection_index, + #bevy_reflect_path::serde::SkippedField::new(#default_fn) + )} + }); + quote! { + #bevy_reflect_path::serde::SerializationData::new( + ::core::iter::IntoIterator::into_iter([#(#fields),*]) + ) + } + } +} + +/// Collected field data used to generate a `SkippedField` type. +pub(crate) struct SkippedFieldDef { + /// The default function for this field. + /// + /// This is of type `fn() -> Box`. + default_fn: proc_macro2::TokenStream, +} + +impl SkippedFieldDef { + pub fn new(field: &StructField<'_>) -> Result { + let ty = &field.data.ty; + + let default_fn = match &field.attrs.default { + DefaultBehavior::Func(func) => quote! { + || { #FQBox::new(#func()) } + }, + _ => quote! { + || { #FQBox::new(<#ty as #FQDefault>::default()) } + }, + }; + + Ok(Self { default_fn }) + } +} diff --git a/crates/bevy_reflect/bevy_reflect_derive/src/utility.rs b/crates/bevy_reflect/bevy_reflect_derive/src/utility.rs index 9d25e35a37533..0cd4c88b4cae9 100644 --- a/crates/bevy_reflect/bevy_reflect_derive/src/utility.rs +++ b/crates/bevy_reflect/bevy_reflect_derive/src/utility.rs @@ -1,12 +1,10 @@ //! General-purpose utility functions for internal usage within this crate. use crate::derive_data::{ReflectMeta, StructField}; -use crate::field_attributes::ReflectIgnoreBehavior; use bevy_macro_utils::{ fq_std::{FQAny, FQOption, FQSend, FQSync}, BevyManifest, }; -use bit_set::BitSet; use proc_macro2::{Ident, Span}; use quote::{quote, ToTokens}; use syn::{spanned::Spanned, LitStr, Member, Path, Type, WhereClause}; @@ -286,42 +284,6 @@ impl ResultSifter { } } -/// Converts an iterator over ignore behavior of members to a bitset of ignored members. -/// -/// Takes into account the fact that always ignored (non-reflected) members are skipped. -/// -/// # Example -/// ```rust,ignore -/// pub struct HelloWorld { -/// reflected_field: u32 // index: 0 -/// -/// #[reflect(ignore)] -/// non_reflected_field: u32 // index: N/A (not 1!) -/// -/// #[reflect(skip_serializing)] -/// non_serialized_field: u32 // index: 1 -/// } -/// ``` -/// Would convert to the `0b01` bitset (i.e second field is NOT serialized) -/// -pub(crate) fn members_to_serialization_denylist(member_iter: T) -> BitSet -where - T: Iterator, -{ - let mut bitset = BitSet::default(); - - member_iter.fold(0, |next_idx, member| match member { - ReflectIgnoreBehavior::IgnoreAlways => next_idx, - ReflectIgnoreBehavior::IgnoreSerialization => { - bitset.insert(next_idx); - next_idx + 1 - } - ReflectIgnoreBehavior::None => next_idx + 1, - }); - - bitset -} - /// Turns an `Option` into a `TokenStream` for an `Option`. pub(crate) fn wrap_in_option(tokens: Option) -> proc_macro2::TokenStream { match tokens { diff --git a/crates/bevy_reflect/src/lib.rs b/crates/bevy_reflect/src/lib.rs index b01adece410bb..1a02cf4ed838d 100644 --- a/crates/bevy_reflect/src/lib.rs +++ b/crates/bevy_reflect/src/lib.rs @@ -764,6 +764,39 @@ mod tests { .unwrap_or_default()); } + #[test] + fn from_reflect_should_allow_ignored_unnamed_fields() { + #[derive(Reflect, Eq, PartialEq, Debug)] + struct MyTupleStruct(i8, #[reflect(ignore)] i16, i32); + + let expected = MyTupleStruct(1, 0, 3); + + let mut dyn_tuple_struct = DynamicTupleStruct::default(); + dyn_tuple_struct.insert(1_i8); + dyn_tuple_struct.insert(3_i32); + let my_tuple_struct = ::from_reflect(&dyn_tuple_struct); + + assert_eq!(Some(expected), my_tuple_struct); + + #[derive(Reflect, Eq, PartialEq, Debug)] + enum MyEnum { + Tuple(i8, #[reflect(ignore)] i16, i32), + } + + let expected = MyEnum::Tuple(1, 0, 3); + + let mut dyn_tuple = DynamicTuple::default(); + dyn_tuple.insert(1_i8); + dyn_tuple.insert(3_i32); + + let mut dyn_enum = DynamicEnum::default(); + dyn_enum.set_variant("Tuple", dyn_tuple); + + let my_enum = ::from_reflect(&dyn_enum); + + assert_eq!(Some(expected), my_enum); + } + #[test] fn from_reflect_should_use_default_field_attributes() { #[derive(Reflect, Eq, PartialEq, Debug)] diff --git a/crates/bevy_reflect/src/serde/de.rs b/crates/bevy_reflect/src/serde/de.rs index 38f1795186d9a..170c6c941cf1f 100644 --- a/crates/bevy_reflect/src/serde/de.rs +++ b/crates/bevy_reflect/src/serde/de.rs @@ -2,9 +2,8 @@ use crate::serde::SerializationData; use crate::{ ArrayInfo, DynamicArray, DynamicEnum, DynamicList, DynamicMap, DynamicStruct, DynamicTuple, DynamicTupleStruct, DynamicVariant, EnumInfo, ListInfo, Map, MapInfo, NamedField, Reflect, - ReflectDeserialize, StructInfo, StructVariantInfo, Tuple, TupleInfo, TupleStruct, - TupleStructInfo, TupleVariantInfo, TypeInfo, TypeRegistration, TypeRegistry, UnnamedField, - VariantInfo, + ReflectDeserialize, StructInfo, StructVariantInfo, TupleInfo, TupleStructInfo, + TupleVariantInfo, TypeInfo, TypeRegistration, TypeRegistry, UnnamedField, VariantInfo, }; use erased_serde::Deserializer; use serde::de::{ @@ -27,6 +26,8 @@ pub trait DeserializeValue { trait StructLikeInfo { fn get_path(&self) -> &str; fn get_field(&self, name: &str) -> Option<&NamedField>; + fn field_at(&self, index: usize) -> Option<&NamedField>; + fn get_field_len(&self) -> usize; fn iter_fields(&self) -> Iter<'_, NamedField>; } @@ -49,10 +50,18 @@ impl StructLikeInfo for StructInfo { self.type_path() } + fn field_at(&self, index: usize) -> Option<&NamedField> { + self.field_at(index) + } + fn get_field(&self, name: &str) -> Option<&NamedField> { self.field(name) } + fn get_field_len(&self) -> usize { + self.field_len() + } + fn iter_fields(&self) -> Iter<'_, NamedField> { self.iter() } @@ -80,10 +89,18 @@ impl StructLikeInfo for StructVariantInfo { self.name() } + fn field_at(&self, index: usize) -> Option<&NamedField> { + self.field_at(index) + } + fn get_field(&self, name: &str) -> Option<&NamedField> { self.field(name) } + fn get_field_len(&self) -> usize { + self.field_len() + } + fn iter_fields(&self) -> Iter<'_, NamedField> { self.iter() } @@ -120,6 +137,54 @@ impl TupleLikeInfo for TupleInfo { } } +impl Container for TupleInfo { + fn get_field_registration<'a, E: Error>( + &self, + index: usize, + registry: &'a TypeRegistry, + ) -> Result<&'a TypeRegistration, E> { + let field = self.field_at(index).ok_or_else(|| { + de::Error::custom(format_args!( + "no field at index {} on tuple {}", + index, + self.type_path(), + )) + })?; + get_registration(field.type_id(), field.type_path(), registry) + } +} + +impl TupleLikeInfo for TupleStructInfo { + fn get_path(&self) -> &str { + self.type_path() + } + + fn get_field(&self, index: usize) -> Option<&UnnamedField> { + self.field_at(index) + } + + fn get_field_len(&self) -> usize { + self.field_len() + } +} + +impl Container for TupleStructInfo { + fn get_field_registration<'a, E: Error>( + &self, + index: usize, + registry: &'a TypeRegistry, + ) -> Result<&'a TypeRegistration, E> { + let field = self.field_at(index).ok_or_else(|| { + de::Error::custom(format_args!( + "no field at index {} on tuple struct {}", + index, + self.type_path(), + )) + })?; + get_registration(field.type_id(), field.type_path(), registry) + } +} + impl TupleLikeInfo for TupleVariantInfo { fn get_path(&self) -> &str { self.name() @@ -134,6 +199,23 @@ impl TupleLikeInfo for TupleVariantInfo { } } +impl Container for TupleVariantInfo { + fn get_field_registration<'a, E: Error>( + &self, + index: usize, + registry: &'a TypeRegistry, + ) -> Result<&'a TypeRegistration, E> { + let field = self.field_at(index).ok_or_else(|| { + de::Error::custom(format_args!( + "no field at index {} on tuple variant {}", + index, + self.name(), + )) + })?; + get_registration(field.type_id(), field.type_path(), registry) + } +} + /// A debug struct used for error messages that displays a list of expected values. /// /// # Example @@ -444,6 +526,7 @@ impl<'a, 'de> DeserializeSeed<'de> for TypedReflectDeserializer<'a> { tuple_info.field_len(), TupleVisitor { tuple_info, + registration: self.registration, registry: self.registry, }, )?; @@ -500,43 +583,14 @@ impl<'a, 'de> Visitor<'de> for StructVisitor<'a> { where V: MapAccess<'de>, { - visit_struct(&mut map, self.struct_info, self.registry) + visit_struct(&mut map, self.struct_info, self.registration, self.registry) } fn visit_seq
(self, mut seq: A) -> Result where A: SeqAccess<'de>, { - let mut index = 0usize; - let mut output = DynamicStruct::default(); - - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - let field_len = self.struct_info.field_len().saturating_sub(ignored_len); - - if field_len == 0 { - // Handle unit structs and ignored fields - return Ok(output); - } - - while let Some(value) = seq.next_element_seed(TypedReflectDeserializer { - registration: self - .struct_info - .get_field_registration(index, self.registry)?, - registry: self.registry, - })? { - let name = self.struct_info.field_at(index).unwrap().name(); - output.insert_boxed(name, value); - index += 1; - if index >= self.struct_info.field_len() { - break; - } - } - - Ok(output) + visit_struct_seq(&mut seq, self.struct_info, self.registration, self.registry) } } @@ -557,64 +611,19 @@ impl<'a, 'de> Visitor<'de> for TupleStructVisitor<'a> { where V: SeqAccess<'de>, { - let mut index = 0usize; - let mut tuple_struct = DynamicTupleStruct::default(); - - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - let field_len = self - .tuple_struct_info - .field_len() - .saturating_sub(ignored_len); - - if field_len == 0 { - // Handle unit structs and ignored fields - return Ok(tuple_struct); - } - - let get_field_registration = |index: usize| -> Result<&'a TypeRegistration, V::Error> { - let field = self.tuple_struct_info.field_at(index).ok_or_else(|| { - de::Error::custom(format_args!( - "no field at index {} on tuple {}", - index, - self.tuple_struct_info.type_path(), - )) - })?; - get_registration(field.type_id(), field.type_path(), self.registry) - }; - - while let Some(value) = seq.next_element_seed(TypedReflectDeserializer { - registration: get_field_registration(index)?, - registry: self.registry, - })? { - tuple_struct.insert_boxed(value); - index += 1; - if index >= self.tuple_struct_info.field_len() { - break; - } - } - - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - if tuple_struct.field_len() != self.tuple_struct_info.field_len() - ignored_len { - return Err(Error::invalid_length( - tuple_struct.field_len(), - &self.tuple_struct_info.field_len().to_string().as_str(), - )); - } - - Ok(tuple_struct) + visit_tuple( + &mut seq, + self.tuple_struct_info, + self.registration, + self.registry, + ) + .map(DynamicTupleStruct::from) } } struct TupleVisitor<'a> { tuple_info: &'static TupleInfo, + registration: &'a TypeRegistration, registry: &'a TypeRegistry, } @@ -629,7 +638,7 @@ impl<'a, 'de> Visitor<'de> for TupleVisitor<'a> { where V: SeqAccess<'de>, { - visit_tuple(&mut seq, self.tuple_info, self.registry) + visit_tuple(&mut seq, self.tuple_info, self.registration, self.registry) } } @@ -782,9 +791,7 @@ impl<'a, 'de> Visitor<'de> for EnumVisitor<'a> { )? .into(), VariantInfo::Tuple(tuple_info) if tuple_info.field_len() == 1 => { - let field = tuple_info.field_at(0).unwrap(); - let registration = - get_registration(field.type_id(), field.type_path(), self.registry)?; + let registration = tuple_info.get_field_registration(0, self.registry)?; let value = variant.newtype_variant_seed(TypedReflectDeserializer { registration, registry: self.registry, @@ -879,43 +886,14 @@ impl<'a, 'de> Visitor<'de> for StructVariantVisitor<'a> { where V: MapAccess<'de>, { - visit_struct(&mut map, self.struct_info, self.registry) + visit_struct(&mut map, self.struct_info, self.registration, self.registry) } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { - let mut index = 0usize; - let mut output = DynamicStruct::default(); - - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - let field_len = self.struct_info.field_len().saturating_sub(ignored_len); - - if field_len == 0 { - // Handle all fields being ignored - return Ok(output); - } - - while let Some(value) = seq.next_element_seed(TypedReflectDeserializer { - registration: self - .struct_info - .get_field_registration(index, self.registry)?, - registry: self.registry, - })? { - let name = self.struct_info.field_at(index).unwrap().name(); - output.insert_boxed(name, value); - index += 1; - if index >= self.struct_info.field_len() { - break; - } - } - - Ok(output) + visit_struct_seq(&mut seq, self.struct_info, self.registration, self.registry) } } @@ -936,19 +914,7 @@ impl<'a, 'de> Visitor<'de> for TupleVariantVisitor<'a> { where V: SeqAccess<'de>, { - let ignored_len = self - .registration - .data::() - .map(|data| data.len()) - .unwrap_or(0); - let field_len = self.tuple_info.field_len().saturating_sub(ignored_len); - - if field_len == 0 { - // Handle all fields being ignored - return Ok(DynamicTuple::default()); - } - - visit_tuple(&mut seq, self.tuple_info, self.registry) + visit_tuple(&mut seq, self.tuple_info, self.registration, self.registry) } } @@ -1005,6 +971,7 @@ impl<'a, 'de> Visitor<'de> for OptionVisitor<'a> { fn visit_struct<'de, T, V>( map: &mut V, info: &'static T, + registration: &TypeRegistration, registry: &TypeRegistry, ) -> Result where @@ -1029,49 +996,101 @@ where dynamic_struct.insert_boxed(&key, value); } + if let Some(serialization_data) = registration.data::() { + for (skipped_index, skipped_field) in serialization_data.iter_skipped() { + let Some(field) = info.field_at(*skipped_index) else { + continue; + }; + dynamic_struct.insert_boxed(field.name(), skipped_field.generate_default()); + } + } + Ok(dynamic_struct) } fn visit_tuple<'de, T, V>( seq: &mut V, info: &T, + registration: &TypeRegistration, registry: &TypeRegistry, ) -> Result where - T: TupleLikeInfo, + T: TupleLikeInfo + Container, V: SeqAccess<'de>, { let mut tuple = DynamicTuple::default(); - let mut index = 0usize; - let get_field_registration = |index: usize| -> Result<&TypeRegistration, V::Error> { - let field = info.get_field(index).ok_or_else(|| { - Error::invalid_length(index, &info.get_field_len().to_string().as_str()) - })?; - get_registration(field.type_id(), field.type_path(), registry) - }; + let len = info.get_field_len(); - while let Some(value) = seq.next_element_seed(TypedReflectDeserializer { - registration: get_field_registration(index)?, - registry, - })? { - tuple.insert_boxed(value); - index += 1; - if index >= info.get_field_len() { - break; + if len == 0 { + // Handle empty tuple/tuple struct + return Ok(tuple); + } + + let serialization_data = registration.data::(); + + for index in 0..len { + if let Some(value) = serialization_data.and_then(|data| data.generate_default(index)) { + tuple.insert_boxed(value); + continue; } + + let value = seq + .next_element_seed(TypedReflectDeserializer { + registration: info.get_field_registration(index, registry)?, + registry, + })? + .ok_or_else(|| Error::invalid_length(index, &len.to_string().as_str()))?; + tuple.insert_boxed(value); } + Ok(tuple) +} + +fn visit_struct_seq<'de, T, V>( + seq: &mut V, + info: &T, + registration: &TypeRegistration, + registry: &TypeRegistry, +) -> Result +where + T: StructLikeInfo + Container, + V: SeqAccess<'de>, +{ + let mut dynamic_struct = DynamicStruct::default(); + let len = info.get_field_len(); - if tuple.field_len() != len { - return Err(Error::invalid_length( - tuple.field_len(), - &len.to_string().as_str(), - )); + if len == 0 { + // Handle unit structs + return Ok(dynamic_struct); } - Ok(tuple) + let serialization_data = registration.data::(); + + for index in 0..len { + let name = info.field_at(index).unwrap().name(); + + if serialization_data + .map(|data| data.is_field_skipped(index)) + .unwrap_or_default() + { + if let Some(value) = serialization_data.unwrap().generate_default(index) { + dynamic_struct.insert_boxed(name, value); + } + continue; + } + + let value = seq + .next_element_seed(TypedReflectDeserializer { + registration: info.get_field_registration(index, registry)?, + registry, + })? + .ok_or_else(|| Error::invalid_length(index, &len.to_string().as_str()))?; + dynamic_struct.insert_boxed(name, value); + } + + Ok(dynamic_struct) } fn get_registration<'a, E: Error>( diff --git a/crates/bevy_reflect/src/serde/mod.rs b/crates/bevy_reflect/src/serde/mod.rs index 5f87eba8f304b..c444279fa928a 100644 --- a/crates/bevy_reflect/src/serde/mod.rs +++ b/crates/bevy_reflect/src/serde/mod.rs @@ -12,7 +12,7 @@ mod tests { use crate::{ serde::{ReflectSerializer, UntypedReflectDeserializer}, type_registry::TypeRegistry, - DynamicStruct, Reflect, + DynamicStruct, FromReflect, Reflect, }; use serde::de::DeserializeSeed; @@ -26,7 +26,14 @@ mod tests { b: i32, #[reflect(skip_serializing)] c: i32, + #[reflect(skip_serializing)] + #[reflect(default = "custom_default")] d: i32, + e: i32, + } + + fn custom_default() -> i32 { + -1 } let mut registry = TypeRegistry::default(); @@ -37,24 +44,42 @@ mod tests { b: 4, c: 5, d: 6, + e: 7, }; let serializer = ReflectSerializer::new(&test_struct, ®istry); let serialized = ron::ser::to_string_pretty(&serializer, ron::ser::PrettyConfig::default()).unwrap(); - let mut expected = DynamicStruct::default(); - expected.insert("a", 3); - expected.insert("d", 6); - let mut deserializer = ron::de::Deserializer::from_str(&serialized).unwrap(); let reflect_deserializer = UntypedReflectDeserializer::new(®istry); let value = reflect_deserializer.deserialize(&mut deserializer).unwrap(); let deserialized = value.take::().unwrap(); + let mut expected = DynamicStruct::default(); + expected.insert("a", 3); + // Ignored: expected.insert("b", 0); + expected.insert("c", 0); + expected.insert("d", -1); + expected.insert("e", 7); + assert!( expected.reflect_partial_eq(&deserialized).unwrap(), - "Expected {expected:?} found {deserialized:?}" + "Deserialization failed: expected {expected:?} found {deserialized:?}" + ); + + let expected = TestStruct { + a: 3, + b: 0, + c: 0, + d: -1, + e: 7, + }; + let received = ::from_reflect(&deserialized).unwrap(); + + assert_eq!( + expected, received, + "FromReflect failed: expected {expected:?} found {received:?}" ); } @@ -66,30 +91,48 @@ mod tests { i32, #[reflect(ignore)] i32, #[reflect(skip_serializing)] i32, + #[reflect(skip_serializing)] + #[reflect(default = "custom_default")] + i32, i32, ); + fn custom_default() -> i32 { + -1 + } + let mut registry = TypeRegistry::default(); registry.register::(); - let test_struct = TestStruct(3, 4, 5, 6); + let test_struct = TestStruct(3, 4, 5, 6, 7); let serializer = ReflectSerializer::new(&test_struct, ®istry); let serialized = ron::ser::to_string_pretty(&serializer, ron::ser::PrettyConfig::default()).unwrap(); - let mut expected = DynamicTupleStruct::default(); - expected.insert(3); - expected.insert(6); - let mut deserializer = ron::de::Deserializer::from_str(&serialized).unwrap(); let reflect_deserializer = UntypedReflectDeserializer::new(®istry); let value = reflect_deserializer.deserialize(&mut deserializer).unwrap(); let deserialized = value.take::().unwrap(); + let mut expected = DynamicTupleStruct::default(); + expected.insert(3); + // Ignored: expected.insert(0); + expected.insert(0); + expected.insert(-1); + expected.insert(7); + assert!( expected.reflect_partial_eq(&deserialized).unwrap(), - "Expected {expected:?} found {deserialized:?}" + "Deserialization failed: expected {expected:?} found {deserialized:?}" + ); + + let expected = TestStruct(3, 0, 0, -1, 7); + let received = ::from_reflect(&deserialized).unwrap(); + + assert_eq!( + expected, received, + "FromReflect failed: expected {expected:?} found {received:?}" ); } diff --git a/crates/bevy_reflect/src/serde/ser.rs b/crates/bevy_reflect/src/serde/ser.rs index 79ec73099a457..fc072a8d2de18 100644 --- a/crates/bevy_reflect/src/serde/ser.rs +++ b/crates/bevy_reflect/src/serde/ser.rs @@ -212,7 +212,7 @@ impl<'a> Serialize for StructSerializer<'a> { for (index, value) in self.struct_value.iter_fields().enumerate() { if serialization_data - .map(|data| data.is_ignored_field(index)) + .map(|data| data.is_field_skipped(index)) .unwrap_or(false) { continue; @@ -265,7 +265,7 @@ impl<'a> Serialize for TupleStructSerializer<'a> { for (index, value) in self.tuple_struct.iter_fields().enumerate() { if serialization_data - .map(|data| data.is_ignored_field(index)) + .map(|data| data.is_field_skipped(index)) .unwrap_or(false) { continue; diff --git a/crates/bevy_reflect/src/serde/type_data.rs b/crates/bevy_reflect/src/serde/type_data.rs index ee69a390d09cb..d82f3b4579095 100644 --- a/crates/bevy_reflect/src/serde/type_data.rs +++ b/crates/bevy_reflect/src/serde/type_data.rs @@ -1,44 +1,136 @@ -use std::collections::HashSet; +use crate::Reflect; +use bevy_utils::hashbrown::hash_map::Iter; +use bevy_utils::HashMap; -/// Contains data relevant to the automatic reflect powered serialization of a type +/// Contains data relevant to the automatic reflect powered (de)serialization of a type. #[derive(Debug, Clone)] pub struct SerializationData { - ignored_field_indices: HashSet, + skipped_fields: HashMap, } impl SerializationData { - /// Creates a new `SerializationData` instance given: + /// Creates a new `SerializationData` instance with the given skipped fields. /// - /// - `ignored_iter`: the iterator of member indices to be ignored during serialization. Indices are assigned only to reflected members, those which are not reflected are skipped. - pub fn new>(ignored_iter: I) -> Self { + /// # Arguments + /// + /// * `skipped_iter`: The iterator of field indices to be skipped during (de)serialization. + /// Indices are assigned only to reflected fields. + /// Ignored fields (i.e. those marked `#[reflect(ignore)]`) are implicitly skipped + /// and do not need to be included in this iterator. + pub fn new>(skipped_iter: I) -> Self { Self { - ignored_field_indices: ignored_iter.collect(), + skipped_fields: skipped_iter.collect(), } } - /// Returns true if the given index corresponds to a field meant to be ignored in serialization. - /// - /// Indices start from 0 and ignored fields are skipped. + /// Returns true if the given index corresponds to a field meant to be skipped during (de)serialization. /// /// # Example /// - /// ```rust,ignore + /// ``` + /// # use std::any::TypeId; + /// # use bevy_reflect::{Reflect, Struct, TypeRegistry, serde::SerializationData}; + /// #[derive(Reflect)] + /// struct MyStruct { + /// serialize_me: i32, + /// #[reflect(skip_serializing)] + /// skip_me: i32 + /// } + /// + /// let mut registry = TypeRegistry::new(); + /// registry.register::(); + /// + /// let my_struct = MyStruct { + /// serialize_me: 123, + /// skip_me: 321, + /// }; + /// + /// let serialization_data = registry.get_type_data::(TypeId::of::()).unwrap(); + /// /// for (idx, field) in my_struct.iter_fields().enumerate(){ - /// if serialization_data.is_ignored_field(idx){ - /// // serialize ... - /// } + /// if serialization_data.is_field_skipped(idx) { + /// // Skipped! + /// assert_eq!(1, idx); + /// } else { + /// // Not Skipped! + /// assert_eq!(0, idx); + /// } + /// } + /// ``` + pub fn is_field_skipped(&self, index: usize) -> bool { + self.skipped_fields.contains_key(&index) + } + + /// Generates a default instance of the skipped field at the given index. + /// + /// Returns `None` if the field is not skipped. + /// + /// # Example + /// + /// ``` + /// # use std::any::TypeId; + /// # use bevy_reflect::{Reflect, Struct, TypeRegistry, serde::SerializationData}; + /// #[derive(Reflect)] + /// struct MyStruct { + /// serialize_me: i32, + /// #[reflect(skip_serializing)] + /// #[reflect(default = "skip_me_default")] + /// skip_me: i32 /// } + /// + /// fn skip_me_default() -> i32 { + /// 789 + /// } + /// + /// let mut registry = TypeRegistry::new(); + /// registry.register::(); + /// + /// let serialization_data = registry.get_type_data::(TypeId::of::()).unwrap(); + /// assert_eq!(789, serialization_data.generate_default(1).unwrap().take::().unwrap()); /// ``` - pub fn is_ignored_field(&self, index: usize) -> bool { - self.ignored_field_indices.contains(&index) + pub fn generate_default(&self, index: usize) -> Option> { + self.skipped_fields + .get(&index) + .map(|field| field.generate_default()) } - /// Returns the number of ignored fields. + /// Returns the number of skipped fields. pub fn len(&self) -> usize { - self.ignored_field_indices.len() + self.skipped_fields.len() } - /// Returns true if there are no ignored fields. + /// Returns true if there are no skipped fields. pub fn is_empty(&self) -> bool { - self.ignored_field_indices.is_empty() + self.skipped_fields.is_empty() + } + + /// Returns an iterator over the skipped fields. + /// + /// Each item in the iterator is a tuple containing: + /// 1. The reflected index of the field + /// 2. The (de)serialization metadata of the field + pub fn iter_skipped(&self) -> Iter<'_, usize, SkippedField> { + self.skipped_fields.iter() + } +} + +/// Data needed for (de)serialization of a skipped field. +#[derive(Debug, Clone)] +pub struct SkippedField { + default_fn: fn() -> Box, +} + +impl SkippedField { + /// Create a new `SkippedField`. + /// + /// # Arguments + /// + /// * `default_fn`: A function pointer used to generate a default instance of the field. + pub fn new(default_fn: fn() -> Box) -> Self { + Self { default_fn } + } + + /// Generates a default instance of the field. + pub fn generate_default(&self) -> Box { + (self.default_fn)() } } diff --git a/crates/bevy_reflect/src/tuple_struct.rs b/crates/bevy_reflect/src/tuple_struct.rs index 9d12490871980..ff9c53d5481aa 100644 --- a/crates/bevy_reflect/src/tuple_struct.rs +++ b/crates/bevy_reflect/src/tuple_struct.rs @@ -1,8 +1,8 @@ use bevy_reflect_derive::impl_type_path; use crate::{ - self as bevy_reflect, Reflect, ReflectMut, ReflectOwned, ReflectRef, TypeInfo, TypePath, - TypePathTable, UnnamedField, + self as bevy_reflect, DynamicTuple, Reflect, ReflectMut, ReflectOwned, ReflectRef, Tuple, + TypeInfo, TypePath, TypePathTable, UnnamedField, }; use std::any::{Any, TypeId}; use std::fmt::{Debug, Formatter}; @@ -390,6 +390,15 @@ impl Debug for DynamicTupleStruct { } } +impl From for DynamicTupleStruct { + fn from(value: DynamicTuple) -> Self { + Self { + represented_type: None, + fields: Box::new(value).drain(), + } + } +} + /// Compares a [`TupleStruct`] with a [`Reflect`] value. /// /// Returns true if and only if all of the following are true: From f9ef989def5a77d0e0a37c70a0a26939e0b9b3a3 Mon Sep 17 00:00:00 2001 From: anarelion Date: Sun, 22 Oct 2023 23:59:39 +0100 Subject: [PATCH 53/63] Implement source into Display for AssetPath (#10217) # Objective When debugging and printing asset paths, I am not 100% if I am in the right source or not ## Solution Add the output of the source --- crates/bevy_asset/src/path.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/bevy_asset/src/path.rs b/crates/bevy_asset/src/path.rs index 43b7d2cab5c77..a6cf38db73848 100644 --- a/crates/bevy_asset/src/path.rs +++ b/crates/bevy_asset/src/path.rs @@ -67,6 +67,9 @@ impl<'a> Debug for AssetPath<'a> { impl<'a> Display for AssetPath<'a> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if let AssetSourceId::Name(name) = self.source() { + write!(f, "{name}://")?; + } write!(f, "{}", self.path.display())?; if let Some(label) = &self.label { write!(f, "#{label}")?; From 1e9258910cdec60e21db10e8efb7651222f76947 Mon Sep 17 00:00:00 2001 From: "Ame :]" <104745335+ameknite@users.noreply.github.com> Date: Sun, 22 Oct 2023 18:01:28 -0500 Subject: [PATCH 54/63] re-export `debug_glam_assert` feature (#10206) # Objective - I want to use the `debug_glam_assert` feature with bevy. ## Solution - Re-export the feature flag --- ## Changelog - Re-export `debug_glam_assert` feature flag from glam. --- Cargo.toml | 3 +++ crates/bevy_internal/Cargo.toml | 4 ++++ crates/bevy_math/Cargo.toml | 2 ++ docs/cargo_features.md | 1 + 4 files changed, 10 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 141cee12392a0..8dd252ffb8cd8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -232,6 +232,9 @@ accesskit_unix = ["bevy_internal/accesskit_unix"] # Enable assertions to check the validity of parameters passed to glam glam_assert = ["bevy_internal/glam_assert"] +# Enable assertions in debug builds to check the validity of parameters passed to glam +debug_glam_assert = ["bevy_internal/debug_glam_assert"] + # Include a default font, containing only ASCII characters, at the cost of a 20kB binary size increase default_font = ["bevy_internal/default_font"] diff --git a/crates/bevy_internal/Cargo.toml b/crates/bevy_internal/Cargo.toml index a7019d5461ea9..5dac70366d10c 100644 --- a/crates/bevy_internal/Cargo.toml +++ b/crates/bevy_internal/Cargo.toml @@ -97,9 +97,13 @@ accesskit_unix = ["bevy_winit/accesskit_unix"] bevy_text = ["dep:bevy_text", "bevy_ui?/bevy_text"] bevy_render = ["dep:bevy_render", "bevy_scene?/bevy_render"] + # Enable assertions to check the validity of parameters passed to glam glam_assert = ["bevy_math/glam_assert"] +# Enable assertions in debug builds to check the validity of parameters passed to glam +debug_glam_assert = ["bevy_math/debug_glam_assert"] + default_font = ["bevy_text?/default_font"] # Enables the built-in asset processor for processed assets. diff --git a/crates/bevy_math/Cargo.toml b/crates/bevy_math/Cargo.toml index f627ec9e6dfbc..df9389effee80 100644 --- a/crates/bevy_math/Cargo.toml +++ b/crates/bevy_math/Cargo.toml @@ -18,3 +18,5 @@ serialize = ["dep:serde", "glam/serde"] mint = ["glam/mint"] # Enable assertions to check the validity of parameters passed to glam glam_assert = ["glam/glam-assert"] +# Enable assertions in debug builds to check the validity of parameters passed to glam +debug_glam_assert = ["glam/debug-glam-assert"] diff --git a/docs/cargo_features.md b/docs/cargo_features.md index 4882b3801a060..47ac836eb5b7f 100644 --- a/docs/cargo_features.md +++ b/docs/cargo_features.md @@ -50,6 +50,7 @@ The default feature set enables most of the expected features of a game engine, |bevy_dynamic_plugin|Plugin for dynamic loading (using [libloading](https://crates.io/crates/libloading))| |bmp|BMP image format support| |dds|DDS compressed texture support| +|debug_glam_assert|Enable assertions in debug builds to check the validity of parameters passed to glam| |detailed_trace|Enable detailed trace event logging. These trace events are expensive even when off, thus they require compile time opt-in| |dynamic_linking|Force dynamic linking, which improves iterative compile times| |embedded_watcher|Enables watching in memory asset providers for Bevy Asset hot-reloading| From e59085a67f255647647820f1c235f97eba67a0c1 Mon Sep 17 00:00:00 2001 From: Marco Buono Date: Mon, 23 Oct 2023 00:26:20 -0300 Subject: [PATCH 55/63] =?UTF-8?q?Use=20=E2=80=9Cspecular=20occlusion?= =?UTF-8?q?=E2=80=9D=20term=20to=20consistently=20extinguish=20fresnel=20o?= =?UTF-8?q?n=20Ambient=20and=20Environment=20Map=20lights=20(#10182)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Objective Even at `reflectance == 0.0`, our ambient and environment map light implementations still produce fresnel/specular highlights. Such a low `reflectance` value lies outside of the physically possible range and is already used by our directional, point and spot light implementations (via the `fresnel()` function) to enable artistic control, effectively disabling the fresnel "look" for non-physically realistic materials. Since ambient and environment lights use a different formulation, they were not honoring this same principle. This PR aims to bring consistency to all light types, offering the same fresnel extinguishing control to ambient and environment lights. Thanks to `@nathanf` for [pointing out](https://discord.com/channels/691052431525675048/743663924229963868/1164083373514440744) the [Filament docs section about this](https://google.github.io/filament/Filament.md.html#lighting/occlusion/specularocclusion). ## Solution - We use [the same formulation](https://github.com/bevyengine/bevy/blob/ffc572728fb7874996a13c31a82e86ef98515995/crates/bevy_pbr/src/render/pbr_lighting.wgsl#L99) already used by the `fresnel()` function in `bevy_pbr::lighting` to modulate the F90, to modulate the specular component of Ambient and Environment Map lights. ## Comparison ⚠️ **Modified version of the PBR example for demo purposes, that shows reflectance (_NOT_ part of this PR)** ⚠️ Also, keep in mind this is a very subtle difference (look for the fresnel highlights on the lower left spheres, you might need to zoom in. ### Before Screenshot 2023-10-18 at 23 02 25 ### After Screenshot 2023-10-18 at 23 01 43 --- ## Changelog - Ambient and Environment Map lights will now honor values of `reflectance` that are below the physically possible range (⪅ 0.35) by extinguishing their fresnel highlights. (Just like point, directional and spot lights already did.) This allows for more consistent artistic control and for non-physically realistic looks with all light types. ## Migration Guide - If Fresnel highlights from Ambient and Environment Map lights are no longer visible in your materials, make sure you're using a higher, physically plausible value of `reflectance` (⪆ 0.35). --- crates/bevy_pbr/src/environment_map/environment_map.wgsl | 7 ++++++- crates/bevy_pbr/src/render/pbr_ambient.wgsl | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/crates/bevy_pbr/src/environment_map/environment_map.wgsl b/crates/bevy_pbr/src/environment_map/environment_map.wgsl index 2288d7d07c8bb..f188a578e478f 100644 --- a/crates/bevy_pbr/src/environment_map/environment_map.wgsl +++ b/crates/bevy_pbr/src/environment_map/environment_map.wgsl @@ -25,12 +25,17 @@ fn environment_map_light( let irradiance = textureSample(bindings::environment_map_diffuse, bindings::environment_map_sampler, vec3(N.xy, -N.z)).rgb; let radiance = textureSampleLevel(bindings::environment_map_specular, bindings::environment_map_sampler, vec3(R.xy, -R.z), radiance_level).rgb; + // No real world material has specular values under 0.02, so we use this range as a + // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. + // See: https://google.github.io/filament/Filament.html#specularocclusion + let specular_occlusion = saturate(dot(F0, vec3(50.0 * 0.33))); + // Multiscattering approximation: https://www.jcgt.org/published/0008/01/03/paper.pdf // Useful reference: https://bruop.github.io/ibl let Fr = max(vec3(1.0 - roughness), F0) - F0; let kS = F0 + Fr * pow(1.0 - NdotV, 5.0); - let FssEss = kS * f_ab.x + f_ab.y; let Ess = f_ab.x + f_ab.y; + let FssEss = kS * Ess * specular_occlusion; let Ems = 1.0 - Ess; let Favg = F0 + (1.0 - F0) / 21.0; let Fms = FssEss * Favg / (1.0 - Ems * Favg); diff --git a/crates/bevy_pbr/src/render/pbr_ambient.wgsl b/crates/bevy_pbr/src/render/pbr_ambient.wgsl index 23d5cf29b235a..7b174da35c9db 100644 --- a/crates/bevy_pbr/src/render/pbr_ambient.wgsl +++ b/crates/bevy_pbr/src/render/pbr_ambient.wgsl @@ -20,5 +20,10 @@ fn ambient_light( let diffuse_ambient = EnvBRDFApprox(diffuse_color, F_AB(1.0, NdotV)); let specular_ambient = EnvBRDFApprox(specular_color, F_AB(perceptual_roughness, NdotV)); - return (diffuse_ambient + specular_ambient) * lights.ambient_color.rgb * occlusion; + // No real world material has specular values under 0.02, so we use this range as a + // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. + // See: https://google.github.io/filament/Filament.html#specularocclusion + let specular_occlusion = saturate(dot(specular_color, vec3(50.0 * 0.33))); + + return (diffuse_ambient + specular_ambient * specular_occlusion) * lights.ambient_color.rgb * occlusion; } From d938275b9c2fda522c01fac84d118cb57f5474cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois?= Date: Mon, 23 Oct 2023 06:15:04 +0200 Subject: [PATCH 56/63] assets: use blake3 instead of md5 (#10208) # Objective - Replace md5 by another hasher, as suggested in https://github.com/bevyengine/bevy/pull/8624#discussion_r1359291028 - md5 is not secure, and is slow. use something more secure and faster ## Solution - Replace md5 by blake3 Putting this PR in the 0.12 as once it's released, changing the hash algorithm will be a painful breaking change --- crates/bevy_asset/Cargo.toml | 2 +- crates/bevy_asset/src/meta.rs | 20 +++++++++----------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/crates/bevy_asset/Cargo.toml b/crates/bevy_asset/Cargo.toml index 922b622b89d8c..d67340effc47f 100644 --- a/crates/bevy_asset/Cargo.toml +++ b/crates/bevy_asset/Cargo.toml @@ -33,7 +33,7 @@ crossbeam-channel = "0.5" downcast-rs = "1.2" futures-io = "0.3" futures-lite = "1.12" -md5 = "0.7" +blake3 = "1.5" parking_lot = { version = "0.12", features = ["arc_lock", "send_guard"] } ron = "0.8" serde = { version = "1", features = ["derive"] } diff --git a/crates/bevy_asset/src/meta.rs b/crates/bevy_asset/src/meta.rs index e6d65b8ecd535..dbcd7d7feb57d 100644 --- a/crates/bevy_asset/src/meta.rs +++ b/crates/bevy_asset/src/meta.rs @@ -225,15 +225,14 @@ pub(crate) fn loader_settings_meta_transform( }) } -pub type AssetHash = [u8; 16]; +pub type AssetHash = [u8; 32]; /// NOTE: changing the hashing logic here is a _breaking change_ that requires a [`META_FORMAT_VERSION`] bump. pub(crate) fn get_asset_hash(meta_bytes: &[u8], asset_bytes: &[u8]) -> AssetHash { - let mut context = md5::Context::new(); - context.consume(meta_bytes); - context.consume(asset_bytes); - let digest = context.compute(); - digest.0 + let mut hasher = blake3::Hasher::new(); + hasher.update(meta_bytes); + hasher.update(asset_bytes); + *hasher.finalize().as_bytes() } /// NOTE: changing the hashing logic here is a _breaking change_ that requires a [`META_FORMAT_VERSION`] bump. @@ -241,11 +240,10 @@ pub(crate) fn get_full_asset_hash( asset_hash: AssetHash, dependency_hashes: impl Iterator, ) -> AssetHash { - let mut context = md5::Context::new(); - context.consume(asset_hash); + let mut hasher = blake3::Hasher::new(); + hasher.update(&asset_hash); for hash in dependency_hashes { - context.consume(hash); + hasher.update(&hash); } - let digest = context.compute(); - digest.0 + *hasher.finalize().as_bytes() } From b28525b772ef076fb7a93ac9e0fd4a1acd58ea82 Mon Sep 17 00:00:00 2001 From: Al M Date: Mon, 23 Oct 2023 05:24:00 -0700 Subject: [PATCH 57/63] Remove unused import warning when default_font feature is disabled (#10230) # Objective - Fixes: ``` warning: unused import: `Handle` --> crates/bevy_text/src/lib.rs:31:28 | 31 | use bevy_asset::{AssetApp, Handle}; | ^^^^^^ | = note: `#[warn(unused_imports)]` on by default ``` ## Solution - Moved import to match feature. --- crates/bevy_text/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/bevy_text/src/lib.rs b/crates/bevy_text/src/lib.rs index f5498d26ef1fb..024dc663f7cd8 100644 --- a/crates/bevy_text/src/lib.rs +++ b/crates/bevy_text/src/lib.rs @@ -26,9 +26,9 @@ pub mod prelude { } use bevy_app::prelude::*; +use bevy_asset::AssetApp; #[cfg(feature = "default_font")] -use bevy_asset::load_internal_binary_asset; -use bevy_asset::{AssetApp, Handle}; +use bevy_asset::{load_internal_binary_asset, Handle}; use bevy_ecs::prelude::*; use bevy_render::{camera::CameraUpdateSystem, ExtractSchedule, RenderApp}; use bevy_sprite::SpriteSystem; From 8fb5c99347964da55dc0e54e9e471132a7d9d3a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois?= Date: Mon, 23 Oct 2023 14:25:02 +0200 Subject: [PATCH 58/63] fix run-once runners (#10195) # Objective - After #9826, there are issues on "run once runners" - example `without_winit` crashes: ``` 2023-10-19T22:06:01.810019Z INFO bevy_render::renderer: AdapterInfo { name: "llvmpipe (LLVM 15.0.7, 256 bits)", vendor: 65541, device: 0, device_type: Cpu, driver: "llvmpipe", driver_info: "Mesa 23.2.1 - kisak-mesa PPA (LLVM 15.0.7)", backend: Vulkan } 2023-10-19T22:06:02.860331Z WARN bevy_audio::audio_output: No audio device found. 2023-10-19T22:06:03.215154Z INFO bevy_diagnostic::system_information_diagnostics_plugin::internal: SystemInfo { os: "Linux 22.04 Ubuntu", kernel: "6.2.0-1014-azure", cpu: "Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz", core_count: "2", memory: "6.8 GiB" } thread 'main' panicked at crates/bevy_render/src/pipelined_rendering.rs:91:14: Unable to get RenderApp. Another plugin may have removed the RenderApp before PipelinedRenderingPlugin note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace ``` - example `headless` runs the app twice with the `run_once` schedule ## Solution - Expose a more complex state of an app than just "ready" - Also block adding plugins to an app after it has finished or cleaned up its plugins as that wouldn't work anyway ## Migration Guide * `app.ready()` has been replaced by `app.plugins_state()` which will return more details on the current state of plugins in the app --- crates/bevy_app/src/app.rs | 66 ++++++++++++++++++++------ crates/bevy_app/src/schedule_runner.rs | 11 +++-- crates/bevy_winit/src/lib.rs | 11 ++--- 3 files changed, 63 insertions(+), 25 deletions(-) diff --git a/crates/bevy_app/src/app.rs b/crates/bevy_app/src/app.rs index db43d534ab4ea..3b206ce2deaea 100644 --- a/crates/bevy_app/src/app.rs +++ b/crates/bevy_app/src/app.rs @@ -76,6 +76,7 @@ pub struct App { plugin_name_added: HashSet, /// A private counter to prevent incorrect calls to `App::run()` from `Plugin::build()` building_plugin_depth: usize, + plugins_state: PluginsState, } impl Debug for App { @@ -194,6 +195,19 @@ impl Default for App { } } +/// Plugins state in the application +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +pub enum PluginsState { + /// Plugins are being added. + Adding, + /// All plugins already added are ready. + Ready, + /// Finish has been executed for all plugins added. + Finished, + /// Cleanup has been executed for all plugins added. + Cleaned, +} + // Dummy plugin used to temporary hold the place in the plugin registry struct PlaceholderPlugin; impl Plugin for PlaceholderPlugin { @@ -221,6 +235,7 @@ impl App { plugin_name_added: Default::default(), main_schedule_label: Box::new(Main), building_plugin_depth: 0, + plugins_state: PluginsState::Adding, } } @@ -288,7 +303,7 @@ impl App { panic!("App::run() was called from within Plugin::build(), which is not allowed."); } - if app.ready() { + if app.plugins_state() == PluginsState::Ready { // If we're already ready, we finish up now and advance one frame. // This prevents black frames during the launch transition on iOS. app.finish(); @@ -300,20 +315,25 @@ impl App { (runner)(app); } - /// Check that [`Plugin::ready`] of all plugins returns true. This is usually called by the + /// Check the state of all plugins already added to this app. This is usually called by the /// event loop, but can be useful for situations where you want to use [`App::update`] - pub fn ready(&self) -> bool { - for plugin in &self.plugin_registry { - if !plugin.ready(self) { - return false; + #[inline] + pub fn plugins_state(&self) -> PluginsState { + match self.plugins_state { + PluginsState::Adding => { + for plugin in &self.plugin_registry { + if !plugin.ready(self) { + return PluginsState::Adding; + } + } + PluginsState::Ready } + state => state, } - true } /// Run [`Plugin::finish`] for each plugin. This is usually called by the event loop once all - /// plugins are [`App::ready`], but can be useful for situations where you want to use - /// [`App::update`]. + /// plugins are ready, but can be useful for situations where you want to use [`App::update`]. pub fn finish(&mut self) { // temporarily remove the plugin registry to run each plugin's setup function on app. let plugin_registry = std::mem::take(&mut self.plugin_registry); @@ -321,6 +341,7 @@ impl App { plugin.finish(self); } self.plugin_registry = plugin_registry; + self.plugins_state = PluginsState::Finished; } /// Run [`Plugin::cleanup`] for each plugin. This is usually called by the event loop after @@ -332,6 +353,7 @@ impl App { plugin.cleanup(self); } self.plugin_registry = plugin_registry; + self.plugins_state = PluginsState::Cleaned; } /// Adds [`State`] and [`NextState`] resources, [`OnEnter`] and [`OnExit`] schedules @@ -696,6 +718,14 @@ impl App { /// [`PluginGroup`]:super::PluginGroup #[track_caller] pub fn add_plugins(&mut self, plugins: impl Plugins) -> &mut Self { + if matches!( + self.plugins_state(), + PluginsState::Cleaned | PluginsState::Finished + ) { + panic!( + "Plugins cannot be added after App::cleanup() or App::finish() has been called." + ); + } plugins.add_to_app(self); self } @@ -947,14 +977,20 @@ impl App { } fn run_once(mut app: App) { - while !app.ready() { - #[cfg(not(target_arch = "wasm32"))] - bevy_tasks::tick_global_task_pools_on_main_thread(); + let plugins_state = app.plugins_state(); + if plugins_state != PluginsState::Cleaned { + while app.plugins_state() == PluginsState::Adding { + #[cfg(not(target_arch = "wasm32"))] + bevy_tasks::tick_global_task_pools_on_main_thread(); + } + app.finish(); + app.cleanup(); } - app.finish(); - app.cleanup(); - app.update(); + // if plugins where cleaned before the runner start, an update already ran + if plugins_state != PluginsState::Cleaned { + app.update(); + } } /// An event that indicates the [`App`] should exit. This will fully exit the app process at the diff --git a/crates/bevy_app/src/schedule_runner.rs b/crates/bevy_app/src/schedule_runner.rs index e14687c92ff59..18b2f0b61fb55 100644 --- a/crates/bevy_app/src/schedule_runner.rs +++ b/crates/bevy_app/src/schedule_runner.rs @@ -1,6 +1,7 @@ use crate::{ app::{App, AppExit}, plugin::Plugin, + PluginsState, }; use bevy_ecs::event::{Events, ManualEventReader}; use bevy_utils::{Duration, Instant}; @@ -71,8 +72,9 @@ impl Plugin for ScheduleRunnerPlugin { fn build(&self, app: &mut App) { let run_mode = self.run_mode; app.set_runner(move |mut app: App| { - if !app.ready() { - while !app.ready() { + let plugins_state = app.plugins_state(); + if plugins_state != PluginsState::Cleaned { + while app.plugins_state() == PluginsState::Adding { #[cfg(not(target_arch = "wasm32"))] bevy_tasks::tick_global_task_pools_on_main_thread(); } @@ -83,7 +85,10 @@ impl Plugin for ScheduleRunnerPlugin { let mut app_exit_event_reader = ManualEventReader::::default(); match run_mode { RunMode::Once => { - app.update(); + // if plugins where cleaned before the runner start, an update already ran + if plugins_state != PluginsState::Cleaned { + app.update(); + } } RunMode::Loop { wait } => { let mut tick = move |app: &mut App, diff --git a/crates/bevy_winit/src/lib.rs b/crates/bevy_winit/src/lib.rs index 38f7d9b6e6cf1..ad77d95269877 100644 --- a/crates/bevy_winit/src/lib.rs +++ b/crates/bevy_winit/src/lib.rs @@ -20,7 +20,7 @@ use system::{changed_windows, create_windows, despawn_windows, CachedWindow}; pub use winit_config::*; pub use winit_windows::*; -use bevy_app::{App, AppExit, Last, Plugin}; +use bevy_app::{App, AppExit, Last, Plugin, PluginsState}; use bevy_ecs::event::{Events, ManualEventReader}; use bevy_ecs::prelude::*; use bevy_ecs::system::{SystemParam, SystemState}; @@ -378,8 +378,6 @@ pub fn winit_runner(mut app: App) { ResMut, )> = SystemState::from_world(&mut app.world); - let mut finished_and_setup_done = app.ready(); - // setup up the event loop let event_handler = move |event: Event<()>, event_loop: &EventLoopWindowTarget<()>, @@ -387,14 +385,13 @@ pub fn winit_runner(mut app: App) { #[cfg(feature = "trace")] let _span = bevy_utils::tracing::info_span!("winit event_handler").entered(); - if !finished_and_setup_done { - if !app.ready() { + if app.plugins_state() != PluginsState::Cleaned { + if app.plugins_state() != PluginsState::Ready { #[cfg(not(target_arch = "wasm32"))] tick_global_task_pools_on_main_thread(); } else { app.finish(); app.cleanup(); - finished_and_setup_done = true; } if let Some(app_exit_events) = app.world.get_resource::>() { @@ -775,7 +772,7 @@ pub fn winit_runner(mut app: App) { } }; - if finished_and_setup_done && should_update { + if app.plugins_state() == PluginsState::Cleaned && should_update { // reset these on each update runner_state.wait_elapsed = false; runner_state.window_event_received = false; From 51c70bc98cd9c71d8e55a4656f22d0229a0ef06e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Harabie=C5=84?= Date: Mon, 23 Oct 2023 14:45:18 +0200 Subject: [PATCH 59/63] Fix fog color being inaccurate (#10226) # Objective Fog color was passed to shaders without conversion from sRGB to linear color space. Because shaders expect colors in linear space this resulted in wrong color being used. This is most noticeable in open scenes with dark fog color and clear color set to the same color. In such case background/clear color (which is properly processed) is going to be darker than very far objects. Example: ![image](https://github.com/bevyengine/bevy/assets/160391/89b70d97-b2d0-4bc5-80f4-c9e8b8801c4c) [bevy-fog-color-bug.zip](https://github.com/bevyengine/bevy/files/13063718/bevy-fog-color-bug.zip) ## Solution Add missing conversion of fog color to linear color space. --- ## Changelog * Fixed conversion of fog color ## Migration Guide - Colors in `FogSettings` struct (`color` and `directional_light_color`) are now sent to the GPU in linear space. If you were using `Color::rgb()`/`Color::rgba()` and would like to retain the previous colors, you can quickly fix it by switching to `Color::rgb_linear()`/`Color::rgba_linear()`. --- crates/bevy_pbr/src/render/fog.rs | 28 ++++++++++++++++++++-------- examples/3d/atmospheric_fog.rs | 4 ++-- examples/3d/fog.rs | 2 +- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/crates/bevy_pbr/src/render/fog.rs b/crates/bevy_pbr/src/render/fog.rs index 4df01418f1d30..1738b261b578e 100644 --- a/crates/bevy_pbr/src/render/fog.rs +++ b/crates/bevy_pbr/src/render/fog.rs @@ -65,24 +65,33 @@ pub fn prepare_fog( match &fog.falloff { FogFalloff::Linear { start, end } => GpuFog { mode: GPU_FOG_MODE_LINEAR, - base_color: fog.color.into(), - directional_light_color: fog.directional_light_color.into(), + base_color: fog.color.as_linear_rgba_f32().into(), + directional_light_color: fog + .directional_light_color + .as_linear_rgba_f32() + .into(), directional_light_exponent: fog.directional_light_exponent, be: Vec3::new(*start, *end, 0.0), ..Default::default() }, FogFalloff::Exponential { density } => GpuFog { mode: GPU_FOG_MODE_EXPONENTIAL, - base_color: fog.color.into(), - directional_light_color: fog.directional_light_color.into(), + base_color: fog.color.as_linear_rgba_f32().into(), + directional_light_color: fog + .directional_light_color + .as_linear_rgba_f32() + .into(), directional_light_exponent: fog.directional_light_exponent, be: Vec3::new(*density, 0.0, 0.0), ..Default::default() }, FogFalloff::ExponentialSquared { density } => GpuFog { mode: GPU_FOG_MODE_EXPONENTIAL_SQUARED, - base_color: fog.color.into(), - directional_light_color: fog.directional_light_color.into(), + base_color: fog.color.as_linear_rgba_f32().into(), + directional_light_color: fog + .directional_light_color + .as_linear_rgba_f32() + .into(), directional_light_exponent: fog.directional_light_exponent, be: Vec3::new(*density, 0.0, 0.0), ..Default::default() @@ -92,8 +101,11 @@ pub fn prepare_fog( inscattering, } => GpuFog { mode: GPU_FOG_MODE_ATMOSPHERIC, - base_color: fog.color.into(), - directional_light_color: fog.directional_light_color.into(), + base_color: fog.color.as_linear_rgba_f32().into(), + directional_light_color: fog + .directional_light_color + .as_linear_rgba_f32() + .into(), directional_light_exponent: fog.directional_light_exponent, be: *extinction, bi: *inscattering, diff --git a/examples/3d/atmospheric_fog.rs b/examples/3d/atmospheric_fog.rs index a8cf9199f9f93..b29fc9adabb9b 100644 --- a/examples/3d/atmospheric_fog.rs +++ b/examples/3d/atmospheric_fog.rs @@ -31,8 +31,8 @@ fn setup_camera_fog(mut commands: Commands) { ..default() }, FogSettings { - color: Color::rgba(0.1, 0.2, 0.4, 1.0), - directional_light_color: Color::rgba(1.0, 0.95, 0.75, 0.5), + color: Color::rgba(0.35, 0.48, 0.66, 1.0), + directional_light_color: Color::rgba(1.0, 0.95, 0.85, 0.5), directional_light_exponent: 30.0, falloff: FogFalloff::from_visibility_colors( 15.0, // distance in world units up to which objects retain visibility (>= 5% contrast) diff --git a/examples/3d/fog.rs b/examples/3d/fog.rs index 35d381ce0fced..f2e3c58ef1ea3 100644 --- a/examples/3d/fog.rs +++ b/examples/3d/fog.rs @@ -34,7 +34,7 @@ fn setup_camera_fog(mut commands: Commands) { commands.spawn(( Camera3dBundle::default(), FogSettings { - color: Color::rgba(0.05, 0.05, 0.05, 1.0), + color: Color::rgba(0.25, 0.25, 0.25, 1.0), falloff: FogFalloff::Linear { start: 5.0, end: 20.0, From 7d504b89c369e04eeb21e0483fd2ebd1915e1d35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois?= Date: Mon, 23 Oct 2023 22:47:55 +0200 Subject: [PATCH 60/63] Application lifetime events (suspend audio on Android) (#10158) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Objective - Handle pausing audio when Android app is suspended ## Solution - This is the start of application lifetime events. They are mostly useful on mobile - Next version of winit should add a few more - When application is suspended, send an event to notify the application, and run the schedule one last time before actually suspending the app - Audio is now suspended too 🎉 https://github.com/bevyengine/bevy/assets/8672791/d74e2e09-ee29-4f40-adf2-36a0c064f94e --------- Co-authored-by: Marco Buono <418473+coreh@users.noreply.github.com> --- crates/bevy_window/src/event.rs | 19 +++++++++ crates/bevy_window/src/lib.rs | 6 ++- crates/bevy_winit/src/lib.rs | 74 ++++++++++++++++++++++++--------- examples/mobile/src/lib.rs | 23 +++++++++- 4 files changed, 99 insertions(+), 23 deletions(-) diff --git a/crates/bevy_window/src/event.rs b/crates/bevy_window/src/event.rs index ffc0939cbef94..d92516d731e42 100644 --- a/crates/bevy_window/src/event.rs +++ b/crates/bevy_window/src/event.rs @@ -330,3 +330,22 @@ pub struct WindowThemeChanged { /// The new system theme. pub theme: WindowTheme, } + +/// Application lifetime events +#[derive(Event, Debug, Clone, Copy, PartialEq, Eq, Reflect)] +#[reflect(Debug, PartialEq)] +#[cfg_attr( + feature = "serialize", + derive(serde::Serialize, serde::Deserialize), + reflect(Serialize, Deserialize) +)] +pub enum ApplicationLifetime { + /// The application just started. + Started, + /// The application was suspended. + /// + /// On Android, applications have one frame to react to this event before being paused in the background. + Suspended, + /// The application was resumed. + Resumed, +} diff --git a/crates/bevy_window/src/lib.rs b/crates/bevy_window/src/lib.rs index bb367918db420..f1e8a304c52a5 100644 --- a/crates/bevy_window/src/lib.rs +++ b/crates/bevy_window/src/lib.rs @@ -98,7 +98,8 @@ impl Plugin for WindowPlugin { .add_event::() .add_event::() .add_event::() - .add_event::(); + .add_event::() + .add_event::(); if let Some(primary_window) = &self.primary_window { let initial_focus = app @@ -141,7 +142,8 @@ impl Plugin for WindowPlugin { .register_type::() .register_type::() .register_type::() - .register_type::(); + .register_type::() + .register_type::(); // Register window descriptor and related types app.register_type::() diff --git a/crates/bevy_winit/src/lib.rs b/crates/bevy_winit/src/lib.rs index ad77d95269877..955f954a6f45f 100644 --- a/crates/bevy_winit/src/lib.rs +++ b/crates/bevy_winit/src/lib.rs @@ -38,10 +38,10 @@ use bevy_utils::{ Duration, Instant, }; use bevy_window::{ - exit_on_all_closed, CursorEntered, CursorLeft, CursorMoved, FileDragAndDrop, Ime, - ReceivedCharacter, RequestRedraw, Window, WindowBackendScaleFactorChanged, - WindowCloseRequested, WindowCreated, WindowDestroyed, WindowFocused, WindowMoved, - WindowResized, WindowScaleFactorChanged, WindowThemeChanged, + exit_on_all_closed, ApplicationLifetime, CursorEntered, CursorLeft, CursorMoved, + FileDragAndDrop, Ime, ReceivedCharacter, RequestRedraw, Window, + WindowBackendScaleFactorChanged, WindowCloseRequested, WindowCreated, WindowDestroyed, + WindowFocused, WindowMoved, WindowResized, WindowScaleFactorChanged, WindowThemeChanged, }; #[cfg(target_os = "android")] use bevy_window::{PrimaryWindow, RawHandleWrapper}; @@ -279,6 +279,7 @@ struct WindowAndInputEventWriters<'w> { window_moved: EventWriter<'w, WindowMoved>, window_theme_changed: EventWriter<'w, WindowThemeChanged>, window_destroyed: EventWriter<'w, WindowDestroyed>, + lifetime: EventWriter<'w, ApplicationLifetime>, keyboard_input: EventWriter<'w, KeyboardInput>, character_input: EventWriter<'w, ReceivedCharacter>, mouse_button_input: EventWriter<'w, MouseButtonInput>, @@ -298,8 +299,8 @@ struct WindowAndInputEventWriters<'w> { /// Persistent state that is used to run the [`App`] according to the current /// [`UpdateMode`]. struct WinitAppRunnerState { - /// Is `true` if the app is running and not suspended. - is_active: bool, + /// Current active state of the app. + active: ActiveState, /// Is `true` if a new [`WindowEvent`] has been received since the last update. window_event_received: bool, /// Is `true` if the app has requested a redraw since the last update. @@ -312,10 +313,28 @@ struct WinitAppRunnerState { scheduled_update: Option, } +#[derive(PartialEq, Eq)] +enum ActiveState { + NotYetStarted, + Active, + Suspended, + WillSuspend, +} + +impl ActiveState { + #[inline] + fn should_run(&self) -> bool { + match self { + ActiveState::NotYetStarted | ActiveState::Suspended => false, + ActiveState::Active | ActiveState::WillSuspend => true, + } + } +} + impl Default for WinitAppRunnerState { fn default() -> Self { Self { - is_active: false, + active: ActiveState::NotYetStarted, window_event_received: false, redraw_requested: false, wait_elapsed: false, @@ -700,19 +719,23 @@ pub fn winit_runner(mut app: App) { }); } event::Event::Suspended => { - runner_state.is_active = false; - #[cfg(target_os = "android")] - { - // Remove the `RawHandleWrapper` from the primary window. - // This will trigger the surface destruction. - let mut query = app.world.query_filtered::>(); - let entity = query.single(&app.world); - app.world.entity_mut(entity).remove::(); - *control_flow = ControlFlow::Wait; - } + let (mut event_writers, _, _) = event_writer_system_state.get_mut(&mut app.world); + event_writers.lifetime.send(ApplicationLifetime::Suspended); + // Mark the state as `WillSuspend`. This will let the schedule run one last time + // before actually suspending to let the application react + runner_state.active = ActiveState::WillSuspend; } event::Event::Resumed => { - runner_state.is_active = true; + let (mut event_writers, _, _) = event_writer_system_state.get_mut(&mut app.world); + match runner_state.active { + ActiveState::NotYetStarted => { + event_writers.lifetime.send(ApplicationLifetime::Started); + } + _ => { + event_writers.lifetime.send(ApplicationLifetime::Resumed); + } + } + runner_state.active = ActiveState::Active; #[cfg(target_os = "android")] { // Get windows that are cached but without raw handles. Those window were already created, but got their @@ -754,7 +777,20 @@ pub fn winit_runner(mut app: App) { } } event::Event::MainEventsCleared => { - if runner_state.is_active { + if runner_state.active.should_run() { + if runner_state.active == ActiveState::WillSuspend { + runner_state.active = ActiveState::Suspended; + #[cfg(target_os = "android")] + { + // Remove the `RawHandleWrapper` from the primary window. + // This will trigger the surface destruction. + let mut query = + app.world.query_filtered::>(); + let entity = query.single(&app.world); + app.world.entity_mut(entity).remove::(); + *control_flow = ControlFlow::Wait; + } + } let (config, windows) = focused_windows_state.get(&app.world); let focused = windows.iter().any(|window| window.focused); let should_update = match config.update_mode(focused) { diff --git a/examples/mobile/src/lib.rs b/examples/mobile/src/lib.rs index 5dcac8aceb677..a4715c43689b0 100644 --- a/examples/mobile/src/lib.rs +++ b/examples/mobile/src/lib.rs @@ -2,7 +2,11 @@ // type aliases tends to obfuscate code while offering no improvement in code cleanliness. #![allow(clippy::type_complexity)] -use bevy::{input::touch::TouchPhase, prelude::*, window::WindowMode}; +use bevy::{ + input::touch::TouchPhase, + prelude::*, + window::{ApplicationLifetime, WindowMode}, +}; // the `bevy_main` proc_macro generates the required boilerplate for iOS and Android #[bevy_main] @@ -17,7 +21,7 @@ fn main() { ..default() })) .add_systems(Startup, (setup_scene, setup_music)) - .add_systems(Update, (touch_camera, button_handler)); + .add_systems(Update, (touch_camera, button_handler, handle_lifetime)); // MSAA makes some Android devices panic, this is under investigation // https://github.com/bevyengine/bevy/issues/8229 @@ -161,3 +165,18 @@ fn setup_music(asset_server: Res, mut commands: Commands) { settings: PlaybackSettings::LOOP, }); } + +// Pause audio when app goes into background and resume when it returns. +// This is handled by the OS on iOS, but not on Android. +fn handle_lifetime( + mut lifetime_events: EventReader, + music_controller: Query<&AudioSink>, +) { + for event in lifetime_events.read() { + match event { + ApplicationLifetime::Suspended => music_controller.single().pause(), + ApplicationLifetime::Resumed => music_controller.single().play(), + ApplicationLifetime::Started => (), + } + } +} From faa1b57de5635105699b1a6c659cbed8e0d7d80e Mon Sep 17 00:00:00 2001 From: Pixelstorm Date: Mon, 23 Oct 2023 21:48:48 +0100 Subject: [PATCH 61/63] Global TaskPool API improvements (#10008) # Objective Reduce code duplication and improve APIs of Bevy's [global taskpools](https://github.com/bevyengine/bevy/blob/main/crates/bevy_tasks/src/usages.rs). ## Solution - As all three of the global taskpools have identical implementations and only differ in their identifiers, this PR moves the implementation into a macro to reduce code duplication. - The `init` method is renamed to `get_or_init` to more accurately reflect what it really does. - Add a new `try_get` method that just returns `None` when the pool is uninitialized, to complement the other getter methods. - Minor documentation improvements to accompany the above changes. --- ## Changelog - Added a new `try_get` method to the global TaskPools - The global TaskPools' `init` method has been renamed to `get_or_init` for clarity - Documentation improvements ## Migration Guide - Uses of `ComputeTaskPool::init`, `AsyncComputeTaskPool::init` and `IoTaskPool::init` should be changed to `::get_or_init`. --- .../bevy_ecs/iteration/heavy_compute.rs | 2 +- crates/bevy_core/src/task_pool_options.rs | 6 +- crates/bevy_ecs/src/lib.rs | 4 +- .../src/schedule/executor/multi_threaded.rs | 2 +- crates/bevy_ecs/src/schedule/mod.rs | 2 +- crates/bevy_tasks/src/usages.rs | 156 +++++++----------- crates/bevy_transform/src/systems.rs | 10 +- 7 files changed, 76 insertions(+), 106 deletions(-) diff --git a/benches/benches/bevy_ecs/iteration/heavy_compute.rs b/benches/benches/bevy_ecs/iteration/heavy_compute.rs index c1b8598b97e97..9a53092903f48 100644 --- a/benches/benches/bevy_ecs/iteration/heavy_compute.rs +++ b/benches/benches/bevy_ecs/iteration/heavy_compute.rs @@ -20,7 +20,7 @@ pub fn heavy_compute(c: &mut Criterion) { group.warm_up_time(std::time::Duration::from_millis(500)); group.measurement_time(std::time::Duration::from_secs(4)); group.bench_function("base", |b| { - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); let mut world = World::default(); diff --git a/crates/bevy_core/src/task_pool_options.rs b/crates/bevy_core/src/task_pool_options.rs index 3eaedb8c8f972..a6eb39df6c723 100644 --- a/crates/bevy_core/src/task_pool_options.rs +++ b/crates/bevy_core/src/task_pool_options.rs @@ -107,7 +107,7 @@ impl TaskPoolOptions { trace!("IO Threads: {}", io_threads); remaining_threads = remaining_threads.saturating_sub(io_threads); - IoTaskPool::init(|| { + IoTaskPool::get_or_init(|| { TaskPoolBuilder::default() .num_threads(io_threads) .thread_name("IO Task Pool".to_string()) @@ -124,7 +124,7 @@ impl TaskPoolOptions { trace!("Async Compute Threads: {}", async_compute_threads); remaining_threads = remaining_threads.saturating_sub(async_compute_threads); - AsyncComputeTaskPool::init(|| { + AsyncComputeTaskPool::get_or_init(|| { TaskPoolBuilder::default() .num_threads(async_compute_threads) .thread_name("Async Compute Task Pool".to_string()) @@ -141,7 +141,7 @@ impl TaskPoolOptions { trace!("Compute Threads: {}", compute_threads); - ComputeTaskPool::init(|| { + ComputeTaskPool::get_or_init(|| { TaskPoolBuilder::default() .num_threads(compute_threads) .thread_name("Compute Task Pool".to_string()) diff --git a/crates/bevy_ecs/src/lib.rs b/crates/bevy_ecs/src/lib.rs index 06001b584dbdc..f971697beb539 100644 --- a/crates/bevy_ecs/src/lib.rs +++ b/crates/bevy_ecs/src/lib.rs @@ -400,7 +400,7 @@ mod tests { #[test] fn par_for_each_dense() { - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); let mut world = World::new(); let e1 = world.spawn(A(1)).id(); let e2 = world.spawn(A(2)).id(); @@ -423,7 +423,7 @@ mod tests { #[test] fn par_for_each_sparse() { - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); let mut world = World::new(); let e1 = world.spawn(SparseStored(1)).id(); let e2 = world.spawn(SparseStored(2)).id(); diff --git a/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs b/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs index dad3fa4459d20..36eb76a94abca 100644 --- a/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs +++ b/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs @@ -195,7 +195,7 @@ impl SystemExecutor for MultiThreadedExecutor { mut conditions, } = SyncUnsafeSchedule::new(schedule); - ComputeTaskPool::init(TaskPool::default).scope_with_executor( + ComputeTaskPool::get_or_init(TaskPool::default).scope_with_executor( false, thread_executor, |scope| { diff --git a/crates/bevy_ecs/src/schedule/mod.rs b/crates/bevy_ecs/src/schedule/mod.rs index 4ed863944acfb..9d48d03df40e9 100644 --- a/crates/bevy_ecs/src/schedule/mod.rs +++ b/crates/bevy_ecs/src/schedule/mod.rs @@ -104,7 +104,7 @@ mod tests { let mut world = World::default(); let mut schedule = Schedule::default(); - let thread_count = ComputeTaskPool::init(TaskPool::default).thread_num(); + let thread_count = ComputeTaskPool::get_or_init(TaskPool::default).thread_num(); let barrier = Arc::new(Barrier::new(thread_count)); diff --git a/crates/bevy_tasks/src/usages.rs b/crates/bevy_tasks/src/usages.rs index 49b8b5cd2ff72..fda3092b8ebc8 100644 --- a/crates/bevy_tasks/src/usages.rs +++ b/crates/bevy_tasks/src/usages.rs @@ -1,107 +1,77 @@ use super::TaskPool; use std::{ops::Deref, sync::OnceLock}; -static COMPUTE_TASK_POOL: OnceLock = OnceLock::new(); -static ASYNC_COMPUTE_TASK_POOL: OnceLock = OnceLock::new(); -static IO_TASK_POOL: OnceLock = OnceLock::new(); - -/// A newtype for a task pool for CPU-intensive work that must be completed to -/// deliver the next frame -/// -/// See [`TaskPool`] documentation for details on Bevy tasks. -/// [`AsyncComputeTaskPool`] should be preferred if the work does not have to be -/// completed before the next frame. -#[derive(Debug)] -pub struct ComputeTaskPool(TaskPool); - -impl ComputeTaskPool { - /// Initializes the global [`ComputeTaskPool`] instance. - pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { - COMPUTE_TASK_POOL.get_or_init(|| Self(f())) - } - - /// Gets the global [`ComputeTaskPool`] instance. - /// - /// # Panics - /// Panics if no pool has been initialized yet. - pub fn get() -> &'static Self { - COMPUTE_TASK_POOL.get().expect( - "A ComputeTaskPool has not been initialized yet. Please call \ - ComputeTaskPool::init beforehand.", - ) - } +macro_rules! taskpool { + ($(#[$attr:meta])* ($static:ident, $type:ident)) => { + static $static: OnceLock<$type> = OnceLock::new(); + + $(#[$attr])* + #[derive(Debug)] + pub struct $type(TaskPool); + + impl $type { + #[doc = concat!(" Gets the global [`", stringify!($type), "`] instance, or initializes it with `f`.")] + pub fn get_or_init(f: impl FnOnce() -> TaskPool) -> &'static Self { + $static.get_or_init(|| Self(f())) + } + + #[doc = concat!(" Attempts to get the global [`", stringify!($type), "`] instance, \ + or returns `None` if it is not initialized.")] + pub fn try_get() -> Option<&'static Self> { + $static.get() + } + + #[doc = concat!(" Gets the global [`", stringify!($type), "`] instance.")] + #[doc = ""] + #[doc = " # Panics"] + #[doc = " Panics if the global instance has not been initialized yet."] + pub fn get() -> &'static Self { + $static.get().expect( + concat!( + "The ", + stringify!($type), + " has not been initialized yet. Please call ", + stringify!($type), + "::get_or_init beforehand." + ) + ) + } + } + + impl Deref for $type { + type Target = TaskPool; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + }; } -impl Deref for ComputeTaskPool { - type Target = TaskPool; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// A newtype for a task pool for CPU-intensive work that may span across multiple frames -/// -/// See [`TaskPool`] documentation for details on Bevy tasks. Use [`ComputeTaskPool`] if -/// the work must be complete before advancing to the next frame. -#[derive(Debug)] -pub struct AsyncComputeTaskPool(TaskPool); - -impl AsyncComputeTaskPool { - /// Initializes the global [`AsyncComputeTaskPool`] instance. - pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { - ASYNC_COMPUTE_TASK_POOL.get_or_init(|| Self(f())) - } - - /// Gets the global [`AsyncComputeTaskPool`] instance. +taskpool! { + /// A newtype for a task pool for CPU-intensive work that must be completed to + /// deliver the next frame /// - /// # Panics - /// Panics if no pool has been initialized yet. - pub fn get() -> &'static Self { - ASYNC_COMPUTE_TASK_POOL.get().expect( - "A AsyncComputeTaskPool has not been initialized yet. Please call \ - AsyncComputeTaskPool::init beforehand.", - ) - } -} - -impl Deref for AsyncComputeTaskPool { - type Target = TaskPool; - - fn deref(&self) -> &Self::Target { - &self.0 - } + /// See [`TaskPool`] documentation for details on Bevy tasks. + /// [`AsyncComputeTaskPool`] should be preferred if the work does not have to be + /// completed before the next frame. + (COMPUTE_TASK_POOL, ComputeTaskPool) } -/// A newtype for a task pool for IO-intensive work (i.e. tasks that spend very little time in a -/// "woken" state) -#[derive(Debug)] -pub struct IoTaskPool(TaskPool); - -impl IoTaskPool { - /// Initializes the global [`IoTaskPool`] instance. - pub fn init(f: impl FnOnce() -> TaskPool) -> &'static Self { - IO_TASK_POOL.get_or_init(|| Self(f())) - } - - /// Gets the global [`IoTaskPool`] instance. +taskpool! { + /// A newtype for a task pool for CPU-intensive work that may span across multiple frames /// - /// # Panics - /// Panics if no pool has been initialized yet. - pub fn get() -> &'static Self { - IO_TASK_POOL.get().expect( - "A IoTaskPool has not been initialized yet. Please call \ - IoTaskPool::init beforehand.", - ) - } + /// See [`TaskPool`] documentation for details on Bevy tasks. + /// Use [`ComputeTaskPool`] if the work must be complete before advancing to the next frame. + (ASYNC_COMPUTE_TASK_POOL, AsyncComputeTaskPool) } -impl Deref for IoTaskPool { - type Target = TaskPool; - - fn deref(&self) -> &Self::Target { - &self.0 - } +taskpool! { + /// A newtype for a task pool for IO-intensive work (i.e. tasks that spend very little time in a + /// "woken" state) + /// + /// See [`TaskPool`] documentation for details on Bevy tasks. + (IO_TASK_POOL, IoTaskPool) } /// A function used by `bevy_core` to tick the global tasks pools on the main thread. diff --git a/crates/bevy_transform/src/systems.rs b/crates/bevy_transform/src/systems.rs index b25784c3889fe..8f6bac916a739 100644 --- a/crates/bevy_transform/src/systems.rs +++ b/crates/bevy_transform/src/systems.rs @@ -193,7 +193,7 @@ mod test { #[test] fn correct_parent_removed() { - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); let mut world = World::default(); let offset_global_transform = |offset| GlobalTransform::from(Transform::from_xyz(offset, offset, offset)); @@ -248,7 +248,7 @@ mod test { #[test] fn did_propagate() { - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); let mut world = World::default(); let mut schedule = Schedule::default(); @@ -326,7 +326,7 @@ mod test { #[test] fn correct_children() { - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); let mut world = World::default(); let mut schedule = Schedule::default(); @@ -404,7 +404,7 @@ mod test { #[test] fn correct_transforms_when_no_children() { let mut app = App::new(); - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); app.add_systems(Update, (sync_simple_transforms, propagate_transforms)); @@ -446,7 +446,7 @@ mod test { #[test] #[should_panic] fn panic_when_hierarchy_cycle() { - ComputeTaskPool::init(TaskPool::default); + ComputeTaskPool::get_or_init(TaskPool::default); // We cannot directly edit Parent and Children, so we use a temp world to break // the hierarchy's invariants. let mut temp = World::new(); From afe8b5f20da2bfa9629fa8ed1ff24852553e616c Mon Sep 17 00:00:00 2001 From: st0rmbtw <61053971+st0rmbtw@users.noreply.github.com> Date: Mon, 23 Oct 2023 23:49:02 +0300 Subject: [PATCH 62/63] Replace all usages of texture_descritor.size.* with the helper methods (#10227) # Objective A follow-up PR for https://github.com/bevyengine/bevy/pull/10221 ## Changelog Replaced usages of texture_descriptor.size with the helper methods of `Image` through the entire engine codebase --- crates/bevy_pbr/src/render/mesh.rs | 13 +++--- crates/bevy_render/src/camera/camera.rs | 5 +-- .../bevy_render/src/texture/fallback_image.rs | 8 +--- crates/bevy_render/src/texture/image.rs | 21 ++++++---- .../src/texture/image_texture_conversion.rs | 40 +++++++------------ .../src/dynamic_texture_atlas_builder.rs | 6 +-- crates/bevy_sprite/src/mesh2d/mesh.rs | 13 +++--- crates/bevy_sprite/src/render/mod.rs | 11 +++-- .../bevy_sprite/src/texture_atlas_builder.rs | 7 +--- crates/bevy_text/src/font_atlas_set.rs | 2 +- crates/bevy_ui/src/widget/image.rs | 5 +-- examples/3d/skybox.rs | 4 +- 12 files changed, 57 insertions(+), 78 deletions(-) diff --git a/crates/bevy_pbr/src/render/mesh.rs b/crates/bevy_pbr/src/render/mesh.rs index 8f884e6bb980e..a798cc6ac3b71 100644 --- a/crates/bevy_pbr/src/render/mesh.rs +++ b/crates/bevy_pbr/src/render/mesh.rs @@ -17,7 +17,7 @@ use bevy_ecs::{ query::{QueryItem, ROQueryItem}, system::{lifetimeless::*, SystemParamItem, SystemState}, }; -use bevy_math::{Affine3, Vec2, Vec4}; +use bevy_math::{Affine3, Vec4}; use bevy_render::{ batching::{ batch_and_prepare_render_phase, write_batched_instance_buffer, GetBatchData, @@ -374,7 +374,9 @@ impl FromWorld for MeshPipeline { let texture = render_device.create_texture(&image.texture_descriptor); let sampler = match image.sampler_descriptor { ImageSampler::Default => (**default_sampler).clone(), - ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + ImageSampler::Descriptor(ref descriptor) => { + render_device.create_sampler(descriptor) + } }; let format_size = image.texture_descriptor.format.pixel_size(); @@ -388,7 +390,7 @@ impl FromWorld for MeshPipeline { &image.data, ImageDataLayout { offset: 0, - bytes_per_row: Some(image.texture_descriptor.size.width * format_size as u32), + bytes_per_row: Some(image.width() * format_size as u32), rows_per_image: None, }, image.texture_descriptor.size, @@ -400,10 +402,7 @@ impl FromWorld for MeshPipeline { texture_view, texture_format: image.texture_descriptor.format, sampler, - size: Vec2::new( - image.texture_descriptor.size.width as f32, - image.texture_descriptor.size.height as f32, - ), + size: image.size_f32(), mip_level_count: image.texture_descriptor.mip_level_count, } }; diff --git a/crates/bevy_render/src/camera/camera.rs b/crates/bevy_render/src/camera/camera.rs index aafe2b8a80c86..e03dc7a78985d 100644 --- a/crates/bevy_render/src/camera/camera.rs +++ b/crates/bevy_render/src/camera/camera.rs @@ -27,7 +27,7 @@ use bevy_window::{ NormalizedWindowRef, PrimaryWindow, Window, WindowCreated, WindowRef, WindowResized, }; use std::{borrow::Cow, ops::Range}; -use wgpu::{BlendState, Extent3d, LoadOp, TextureFormat}; +use wgpu::{BlendState, LoadOp, TextureFormat}; /// Render viewport configuration for the [`Camera`] component. /// @@ -509,9 +509,8 @@ impl NormalizedRenderTarget { }), NormalizedRenderTarget::Image(image_handle) => { let image = images.get(image_handle)?; - let Extent3d { width, height, .. } = image.texture_descriptor.size; Some(RenderTargetInfo { - physical_size: UVec2::new(width, height), + physical_size: image.size(), scale_factor: 1.0, }) } diff --git a/crates/bevy_render/src/texture/fallback_image.rs b/crates/bevy_render/src/texture/fallback_image.rs index 4075963a614a2..923a0247e6cf7 100644 --- a/crates/bevy_render/src/texture/fallback_image.rs +++ b/crates/bevy_render/src/texture/fallback_image.rs @@ -4,7 +4,6 @@ use bevy_ecs::{ prelude::{FromWorld, Res, ResMut}, system::{Resource, SystemParam}, }; -use bevy_math::Vec2; use bevy_utils::HashMap; use wgpu::{Extent3d, TextureFormat}; @@ -103,17 +102,14 @@ fn fallback_image_new( }); let sampler = match image.sampler_descriptor { ImageSampler::Default => (**default_sampler).clone(), - ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + ImageSampler::Descriptor(ref descriptor) => render_device.create_sampler(descriptor), }; GpuImage { texture, texture_view, texture_format: image.texture_descriptor.format, sampler, - size: Vec2::new( - image.texture_descriptor.size.width as f32, - image.texture_descriptor.size.height as f32, - ), + size: image.size_f32(), mip_level_count: image.texture_descriptor.mip_level_count, } } diff --git a/crates/bevy_render/src/texture/image.rs b/crates/bevy_render/src/texture/image.rs index aac9759278e1b..b90ec82ce431a 100644 --- a/crates/bevy_render/src/texture/image.rs +++ b/crates/bevy_render/src/texture/image.rs @@ -254,27 +254,32 @@ impl Image { value } - /// Returns the aspect ratio (height/width) of a 2D image. - pub fn aspect_ratio(&self) -> f32 { - self.height() as f32 / self.width() as f32 - } - /// Returns the width of a 2D image. + #[inline] pub fn width(&self) -> u32 { self.texture_descriptor.size.width } /// Returns the height of a 2D image. + #[inline] pub fn height(&self) -> u32 { self.texture_descriptor.size.height } + /// Returns the aspect ratio (height/width) of a 2D image. + #[inline] + pub fn aspect_ratio(&self) -> f32 { + self.height() as f32 / self.width() as f32 + } + /// Returns the size of a 2D image as f32. + #[inline] pub fn size_f32(&self) -> Vec2 { Vec2::new(self.width() as f32, self.height() as f32) } /// Returns the size of a 2D image. + #[inline] pub fn size(&self) -> UVec2 { UVec2::new(self.width(), self.height()) } @@ -316,11 +321,11 @@ impl Image { // Must be a stacked image, and the height must be divisible by layers. assert!(self.texture_descriptor.dimension == TextureDimension::D2); assert!(self.texture_descriptor.size.depth_or_array_layers == 1); - assert_eq!(self.texture_descriptor.size.height % layers, 0); + assert_eq!(self.height() % layers, 0); self.reinterpret_size(Extent3d { - width: self.texture_descriptor.size.width, - height: self.texture_descriptor.size.height / layers, + width: self.width(), + height: self.height() / layers, depth_or_array_layers: layers, }); } diff --git a/crates/bevy_render/src/texture/image_texture_conversion.rs b/crates/bevy_render/src/texture/image_texture_conversion.rs index 6fce5e5ecf1a4..298c39219c0cc 100644 --- a/crates/bevy_render/src/texture/image_texture_conversion.rs +++ b/crates/bevy_render/src/texture/image_texture_conversion.rs @@ -165,38 +165,28 @@ impl Image { /// To convert [`Image`] to a different format see: [`Image::convert`]. pub fn try_into_dynamic(self) -> Result { match self.texture_descriptor.format { - TextureFormat::R8Unorm => ImageBuffer::from_raw( - self.texture_descriptor.size.width, - self.texture_descriptor.size.height, - self.data, - ) - .map(DynamicImage::ImageLuma8), - TextureFormat::Rg8Unorm => ImageBuffer::from_raw( - self.texture_descriptor.size.width, - self.texture_descriptor.size.height, - self.data, - ) - .map(DynamicImage::ImageLumaA8), - TextureFormat::Rgba8UnormSrgb => ImageBuffer::from_raw( - self.texture_descriptor.size.width, - self.texture_descriptor.size.height, - self.data, - ) - .map(DynamicImage::ImageRgba8), + TextureFormat::R8Unorm => ImageBuffer::from_raw(self.width(), self.height(), self.data) + .map(DynamicImage::ImageLuma8), + TextureFormat::Rg8Unorm => { + ImageBuffer::from_raw(self.width(), self.height(), self.data) + .map(DynamicImage::ImageLumaA8) + } + TextureFormat::Rgba8UnormSrgb => { + ImageBuffer::from_raw(self.width(), self.height(), self.data) + .map(DynamicImage::ImageRgba8) + } // This format is commonly used as the format for the swapchain texture // This conversion is added here to support screenshots - TextureFormat::Bgra8UnormSrgb | TextureFormat::Bgra8Unorm => ImageBuffer::from_raw( - self.texture_descriptor.size.width, - self.texture_descriptor.size.height, - { + TextureFormat::Bgra8UnormSrgb | TextureFormat::Bgra8Unorm => { + ImageBuffer::from_raw(self.width(), self.height(), { let mut data = self.data; for bgra in data.chunks_exact_mut(4) { bgra.swap(0, 2); } data - }, - ) - .map(DynamicImage::ImageRgba8), + }) + .map(DynamicImage::ImageRgba8) + } // Throw and error if conversion isn't supported texture_format => return Err(IntoDynamicImageError::UnsupportedFormat(texture_format)), } diff --git a/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs b/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs index 18dd13a0ed8ff..fcf19bba537ae 100644 --- a/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs +++ b/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs @@ -36,8 +36,8 @@ impl DynamicTextureAtlasBuilder { texture: &Image, ) -> Option { let allocation = self.atlas_allocator.allocate(size2( - texture.texture_descriptor.size.width as i32 + self.padding, - texture.texture_descriptor.size.height as i32 + self.padding, + texture.width() as i32 + self.padding, + texture.height() as i32 + self.padding, )); if let Some(allocation) = allocation { let atlas_texture = textures.get_mut(&texture_atlas.texture).unwrap(); @@ -59,7 +59,7 @@ impl DynamicTextureAtlasBuilder { let mut rect = allocation.rectangle; rect.max.x -= self.padding; rect.max.y -= self.padding; - let atlas_width = atlas_texture.texture_descriptor.size.width as usize; + let atlas_width = atlas_texture.width() as usize; let rect_width = rect.width() as usize; let format_size = atlas_texture.texture_descriptor.format.pixel_size(); diff --git a/crates/bevy_sprite/src/mesh2d/mesh.rs b/crates/bevy_sprite/src/mesh2d/mesh.rs index df5e4f0594011..7aa212177dac3 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh.rs +++ b/crates/bevy_sprite/src/mesh2d/mesh.rs @@ -8,7 +8,7 @@ use bevy_ecs::{ query::{QueryItem, ROQueryItem}, system::{lifetimeless::*, SystemParamItem, SystemState}, }; -use bevy_math::{Affine3, Vec2, Vec4}; +use bevy_math::{Affine3, Vec4}; use bevy_reflect::Reflect; use bevy_render::{ batching::{ @@ -297,7 +297,9 @@ impl FromWorld for Mesh2dPipeline { let texture = render_device.create_texture(&image.texture_descriptor); let sampler = match image.sampler_descriptor { ImageSampler::Default => (**default_sampler).clone(), - ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + ImageSampler::Descriptor(ref descriptor) => { + render_device.create_sampler(descriptor) + } }; let format_size = image.texture_descriptor.format.pixel_size(); @@ -311,7 +313,7 @@ impl FromWorld for Mesh2dPipeline { &image.data, ImageDataLayout { offset: 0, - bytes_per_row: Some(image.texture_descriptor.size.width * format_size as u32), + bytes_per_row: Some(image.width() * format_size as u32), rows_per_image: None, }, image.texture_descriptor.size, @@ -323,10 +325,7 @@ impl FromWorld for Mesh2dPipeline { texture_view, texture_format: image.texture_descriptor.format, sampler, - size: Vec2::new( - image.texture_descriptor.size.width as f32, - image.texture_descriptor.size.height as f32, - ), + size: image.size_f32(), mip_level_count: image.texture_descriptor.mip_level_count, } }; diff --git a/crates/bevy_sprite/src/render/mod.rs b/crates/bevy_sprite/src/render/mod.rs index bebdbad231393..7aa5ae0c147a6 100644 --- a/crates/bevy_sprite/src/render/mod.rs +++ b/crates/bevy_sprite/src/render/mod.rs @@ -93,7 +93,9 @@ impl FromWorld for SpritePipeline { let texture = render_device.create_texture(&image.texture_descriptor); let sampler = match image.sampler_descriptor { ImageSampler::Default => (**default_sampler).clone(), - ImageSampler::Descriptor(descriptor) => render_device.create_sampler(&descriptor), + ImageSampler::Descriptor(ref descriptor) => { + render_device.create_sampler(descriptor) + } }; let format_size = image.texture_descriptor.format.pixel_size(); @@ -107,7 +109,7 @@ impl FromWorld for SpritePipeline { &image.data, ImageDataLayout { offset: 0, - bytes_per_row: Some(image.texture_descriptor.size.width * format_size as u32), + bytes_per_row: Some(image.width() * format_size as u32), rows_per_image: None, }, image.texture_descriptor.size, @@ -118,10 +120,7 @@ impl FromWorld for SpritePipeline { texture_view, texture_format: image.texture_descriptor.format, sampler, - size: Vec2::new( - image.texture_descriptor.size.width as f32, - image.texture_descriptor.size.height as f32, - ), + size: image.size_f32(), mip_level_count: image.texture_descriptor.mip_level_count, } }; diff --git a/crates/bevy_sprite/src/texture_atlas_builder.rs b/crates/bevy_sprite/src/texture_atlas_builder.rs index a4d55016afcc2..50ae821510b2b 100644 --- a/crates/bevy_sprite/src/texture_atlas_builder.rs +++ b/crates/bevy_sprite/src/texture_atlas_builder.rs @@ -105,7 +105,7 @@ impl TextureAtlasBuilder { let rect_height = (packed_location.height() - padding.y) as usize; let rect_x = packed_location.x() as usize; let rect_y = packed_location.y() as usize; - let atlas_width = atlas_texture.texture_descriptor.size.width as usize; + let atlas_width = atlas_texture.width() as usize; let format_size = atlas_texture.texture_descriptor.format.pixel_size(); for (texture_y, bound_y) in (rect_y..rect_y + rect_height).enumerate() { @@ -247,10 +247,7 @@ impl TextureAtlasBuilder { self.copy_converted_texture(&mut atlas_texture, texture, packed_location); } Ok(TextureAtlas { - size: Vec2::new( - atlas_texture.texture_descriptor.size.width as f32, - atlas_texture.texture_descriptor.size.height as f32, - ), + size: atlas_texture.size_f32(), texture: textures.add(atlas_texture), textures: texture_rects, texture_handles: Some(texture_ids), diff --git a/crates/bevy_text/src/font_atlas_set.rs b/crates/bevy_text/src/font_atlas_set.rs index 730d978c1477c..451db26bd50a1 100644 --- a/crates/bevy_text/src/font_atlas_set.rs +++ b/crates/bevy_text/src/font_atlas_set.rs @@ -107,7 +107,7 @@ impl FontAtlasSet { .texture_descriptor .size .height - .max(glyph_texture.texture_descriptor.size.width); + .max(glyph_texture.width()); // Pick the higher of 512 or the smallest power of 2 greater than glyph_max_size let containing = (1u32 << (32 - glyph_max_size.leading_zeros())).max(512) as f32; font_atlases.push(FontAtlas::new( diff --git a/crates/bevy_ui/src/widget/image.rs b/crates/bevy_ui/src/widget/image.rs index f7a54c0da6f42..c676209605b1d 100644 --- a/crates/bevy_ui/src/widget/image.rs +++ b/crates/bevy_ui/src/widget/image.rs @@ -92,10 +92,7 @@ pub fn update_image_content_size_system( for (mut content_size, image, mut image_size) in &mut query { if let Some(texture) = textures.get(&image.texture) { - let size = Vec2::new( - texture.texture_descriptor.size.width as f32, - texture.texture_descriptor.size.height as f32, - ); + let size = texture.size_f32(); // Update only if size or scale factor has changed to avoid needless layout calculations if size != image_size.size || combined_scale_factor != *previous_combined_scale_factor diff --git a/examples/3d/skybox.rs b/examples/3d/skybox.rs index cad96aeb6c711..b275a5ee30386 100644 --- a/examples/3d/skybox.rs +++ b/examples/3d/skybox.rs @@ -146,9 +146,7 @@ fn asset_loaded( // NOTE: PNGs do not have any metadata that could indicate they contain a cubemap texture, // so they appear as one texture. The following code reconfigures the texture as necessary. if image.texture_descriptor.array_layer_count() == 1 { - image.reinterpret_stacked_2d_as_array( - image.texture_descriptor.size.height / image.texture_descriptor.size.width, - ); + image.reinterpret_stacked_2d_as_array(image.height() / image.width()); image.texture_view_descriptor = Some(TextureViewDescriptor { dimension: Some(TextureViewDimension::Cube), ..default() From fb5588413f9438592ebf81cc2820408ffcdcb759 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Oct 2023 01:49:57 +0200 Subject: [PATCH 63/63] Update async-io requirement from 1.13.0 to 2.0.0 (#10238) Updates the requirements on [async-io](https://github.com/smol-rs/async-io) to permit the latest version.