diff --git a/.gitignore b/.gitignore index 9a066723a72f4..9cbf04fa5e8e3 100644 --- a/.gitignore +++ b/.gitignore @@ -39,3 +39,6 @@ assets/scenes/load_scene_example-new.scn.ron # Generated by "examples/large_scenes" compressed_texture_cache + +# Generated by "examples/dev_tools/schedule_data.rs" +**/app_data.ron diff --git a/Cargo.toml b/Cargo.toml index ff7278148092c..f9c38b6ef1e8a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -643,6 +643,9 @@ bevy_debug_stepping = [ "bevy_internal/debug", ] +# Enable collecting schedule data from the app. +schedule_data = ["bevy_internal/schedule_data"] + # Enables the meshlet renderer for dense high-poly scenes (experimental) meshlet = ["bevy_internal/meshlet"] @@ -4609,6 +4612,18 @@ description = "Demonstrates FPS overlay" category = "Dev tools" wasm = true +[[example]] +name = "schedule_data" +path = "examples/dev_tools/schedule_data.rs" +doc-scrape-examples = true +required-features = ["debug", "schedule_data"] + +[package.metadata.example.schedule_data] +name = "Extract Schedule Data" +description = "Extracts the schedule data from a default app and writes it to a file" +category = "Dev tools" +wasm = false + [[example]] name = "infinite_grid" path = "examples/dev_tools/infinite_grid.rs" diff --git a/crates/bevy_dev_tools/Cargo.toml b/crates/bevy_dev_tools/Cargo.toml index 0a1b8d89d1a1b..60ec8a37631d0 100644 --- a/crates/bevy_dev_tools/Cargo.toml +++ b/crates/bevy_dev_tools/Cargo.toml @@ -9,10 +9,17 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -bevy_ci_testing = ["serde", "ron"] +bevy_ci_testing = ["dep:serde", "dep:ron"] screenrecording = ["dep:x264"] webgl = ["bevy_render/webgl"] webgpu = ["bevy_render/webgpu"] +schedule_data = [ + "dep:serde", + "dep:ron", + "dep:bevy_platform", + "dep:bevy_utils", + "dep:thiserror", +] [dependencies] # bevy @@ -37,14 +44,23 @@ bevy_transform = { path = "../bevy_transform", version = "0.19.0-dev" } bevy_shader = { path = "../bevy_shader", version = "0.19.0-dev" } bevy_ui = { path = "../bevy_ui", version = "0.19.0-dev" } bevy_ui_render = { path = "../bevy_ui_render", version = "0.19.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.19.0-dev", optional = true } bevy_window = { path = "../bevy_window", version = "0.19.0-dev" } bevy_state = { path = "../bevy_state", version = "0.19.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.19.0-dev", optional = true } # other +thiserror = { version = "2.0", default-features = false, optional = true } serde = { version = "1.0", features = ["derive"], optional = true } ron = { version = "0.12", optional = true } tracing = { version = "0.1", default-features = false, features = ["std"] } +[dev-dependencies] +# Allow tests to depend on the names of systems. +bevy_ecs = { path = "../bevy_ecs", version = "0.19.0-dev", features = [ + "debug", +] } + [target.'cfg(not(target_os = "windows"))'.dependencies] x264 = { version = "0.5.0", optional = true } diff --git a/crates/bevy_dev_tools/src/lib.rs b/crates/bevy_dev_tools/src/lib.rs index 6c2b59d2daa9c..7207d6399af7e 100644 --- a/crates/bevy_dev_tools/src/lib.rs +++ b/crates/bevy_dev_tools/src/lib.rs @@ -20,6 +20,9 @@ pub mod frame_time_graph; pub mod picking_debug; +#[cfg(feature = "schedule_data")] +pub mod schedule_data; + pub mod states; pub use easy_screenshot::*; diff --git a/crates/bevy_dev_tools/src/schedule_data/mod.rs b/crates/bevy_dev_tools/src/schedule_data/mod.rs new file mode 100644 index 0000000000000..bcaccb11f6c7c --- /dev/null +++ b/crates/bevy_dev_tools/src/schedule_data/mod.rs @@ -0,0 +1,5 @@ +//! Tools for extracting schedule data from an app, and interpreting that data for use with +//! visualization tools (for example). + +pub mod plugin; +pub mod serde; diff --git a/crates/bevy_dev_tools/src/schedule_data/plugin.rs b/crates/bevy_dev_tools/src/schedule_data/plugin.rs new file mode 100644 index 0000000000000..9ee92d2a804c7 --- /dev/null +++ b/crates/bevy_dev_tools/src/schedule_data/plugin.rs @@ -0,0 +1,171 @@ +//! Convenience plugin for automatically performing serialization of schedules on boot. +use std::{fs::File, io::Write, path::PathBuf}; + +use bevy_app::{App, Main, Plugin}; +use bevy_ecs::{ + error::{BevyError, ResultSeverityExt, Severity}, + intern::Interned, + resource::Resource, + schedule::{ + common_conditions::run_once, IntoScheduleConfigs, ScheduleLabel, Schedules, SystemSet, + }, + world::World, +}; +use bevy_platform::collections::HashMap; +use ron::ser::PrettyConfig; + +use crate::schedule_data::serde::AppData; + +/// A plugin to automatically collect and write all schedule data on boot to a file that can later +/// be parsed. +/// +/// By default, the schedule data is written to `/app_data.ron`. This can +/// be configured to a different path using [`SerializeSchedulesFilePath`]. +pub struct SerializeSchedulesPlugin { + /// The schedule into which the systems for collecting/writing the schedule data are added. + /// + /// This schedule **will not** have its schedule data collected, as well as any "parent" + /// schedules. In order to run a schedule, Bevy removes it from the world, meaning if this + /// system is added to schedule [`Update`](bevy_app::Update), that schedule and also [`Main`] + /// will not be included in the [`AppData`]. The default is the [`Main`] schedule since usually + /// there is only one system ([`Main::run_main`]), so there's very little data to collect. + /// + /// Avoid changing this field. This is intended for power-users who might not use the [`Main`] + /// schedule at all. It may also be worth considering just calling [`AppData::from_schedules`] + /// manually to ensure a particular schedule is present. + /// + /// Usually, this will be set using [`Self::in_schedule`]. + pub schedule: Interned, +} + +impl Default for SerializeSchedulesPlugin { + fn default() -> Self { + Self { + schedule: Main.intern(), + } + } +} + +impl SerializeSchedulesPlugin { + /// Creates an instance of [`Self`] that inserts into the specified schedule. + pub fn in_schedule(label: impl ScheduleLabel) -> Self { + Self { + schedule: label.intern(), + } + } +} + +impl Plugin for SerializeSchedulesPlugin { + fn build(&self, app: &mut App) { + app.init_resource::() + .add_systems( + self.schedule, + collect_system_data + .run_if(run_once) + .in_set(SerializeSchedulesSystems) + // While we may not be in the `Main` schedule at all, the default is that, so we + // should make this work properly in the default case. + .before(Main::run_main), + ); + } +} + +/// A system set for allowing users to configure scheduling properties of systems in +/// [`SerializeSchedulesPlugin`]. +#[derive(SystemSet, Hash, PartialEq, Eq, Debug, Clone)] +pub struct SerializeSchedulesSystems; + +/// The file path where schedules will be written to after collected by +/// [`SerializeSchedulesPlugin`]. +#[derive(Resource)] +pub struct SerializeSchedulesFilePath(pub PathBuf); + +impl Default for SerializeSchedulesFilePath { + fn default() -> Self { + Self("app_data.ron".into()) + } +} + +/// The inner part of [`collect_system_data`] that returns the [`AppData`] so we can write tests +/// without needing to write to disk. +fn collect_system_data_inner(world: &mut World) -> Result { + let schedules = world.resource::(); + let labels = schedules + .iter() + .map(|schedule| schedule.1.label()) + .collect::>(); + let mut label_to_build_metadata = HashMap::new(); + + for label in labels { + let mut schedules = world.resource_mut::(); + let mut schedule = schedules.remove(label).unwrap(); + let Some(build_metadata) = schedule.initialize(world)? else { + return Err( + "The schedule has already been built, so we can't collect its system data".into(), + ); + }; + + label_to_build_metadata.insert(label, build_metadata); + + let mut schedules = world.resource_mut::(); + schedules.insert(schedule); + } + + let schedules = world.resource::(); + Ok(AppData::from_schedules( + schedules, + world.components(), + &label_to_build_metadata, + )?) +} + +/// A system that collects all the schedule data and writes it to [`SerializeSchedulesFilePath`]. +fn collect_system_data(world: &mut World) -> Result<(), BevyError> { + let app_data = collect_system_data_inner(world).with_severity(Severity::Warning)?; + let file_path = world + .get_resource::() + .ok_or("Missing SerializeSchedulesFilePath resource") + .with_severity(Severity::Warning)?; + let mut file = File::create(&file_path.0).with_severity(Severity::Warning)?; + // Use \n unconditionally so that Windows formatting is predictable. + let serialized = ron::ser::to_string_pretty(&app_data, PrettyConfig::default().new_line("\n"))?; + file.write_all(serialized.as_bytes()) + .with_severity(Severity::Warning)?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use bevy_app::{App, PostUpdate, Update}; + + use crate::schedule_data::{ + plugin::collect_system_data_inner, + serde::tests::{remove_module_paths, simple_system, sort_app_data}, + }; + + #[test] + fn collects_all_schedules() { + // Start with an empty app so only our stuff gets added. + let mut app = App::empty(); + + fn a() {} + fn b() {} + fn c() {} + app.add_systems(Update, (a, b)); + app.add_systems(PostUpdate, c); + + // Normally users would use the plugin, but to avoid writing to disk in a test, we just call + // the inner part of the system directly. + let mut app_data = collect_system_data_inner(app.world_mut()).unwrap(); + remove_module_paths(&mut app_data); + sort_app_data(&mut app_data); + + assert_eq!(app_data.schedules.len(), 2); + let post_update = &app_data.schedules[0]; + assert_eq!(post_update.name, "PostUpdate"); + assert_eq!(post_update.systems, [simple_system("c")]); + let update = &app_data.schedules[1]; + assert_eq!(update.name, "Update"); + assert_eq!(update.systems, [simple_system("a"), simple_system("b")]); + } +} diff --git a/crates/bevy_dev_tools/src/schedule_data/serde.rs b/crates/bevy_dev_tools/src/schedule_data/serde.rs new file mode 100644 index 0000000000000..9d04485321b8d --- /dev/null +++ b/crates/bevy_dev_tools/src/schedule_data/serde.rs @@ -0,0 +1,887 @@ +//! Utilities for serializing schedule data for an [`App`](bevy_app::App). +//! +//! These are mostly around providing types implementing [`Serialize`]/[`Deserialize`] that +//! represent schedule data. In addition, there are tools for extracting this data from the +//! [`World`](bevy_ecs::world::World). + +use bevy_ecs::{ + component::{ComponentId, Components}, + schedule::{ + ApplyDeferred, ConditionWithAccess, InternedScheduleLabel, NodeId, Schedule, + ScheduleBuildMetadata, Schedules, + }, + system::SystemStateFlags, +}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +/// The data for the entire app's schedule. +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct AppData { + /// A list of all schedules in the app. + pub schedules: Vec, +} + +impl AppData { + /// Creates the data from the underlying [`Schedules`]. + /// + /// Note: we assume all schedules in `schedules` have been initialized through + /// [`Schedule::initialize`]. + pub fn from_schedules( + schedules: &Schedules, + world_components: &Components, + label_to_build_metadata: &HashMap, + ) -> Result { + Ok(Self { + schedules: schedules + .iter() + .map(|(_, schedule)| { + ScheduleData::from_schedule( + schedule, + world_components, + label_to_build_metadata.get(&schedule.label()), + ) + }) + .collect::>()?, + }) + } +} + +/// Data about a particular schedule. +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ScheduleData { + /// The name of the schedule. + pub name: String, + /// The systems in this schedule. + pub systems: Vec, + /// The system sets in this schedule. + pub system_sets: Vec, + /// A list of relationships indicating that a system/system set is contained in a system set. + /// + /// The order is (parent, child). + pub hierarchy: Vec<(SystemSetIndex, ScheduleIndex)>, + /// A list of ordering constraints, ensuring that one system/system set runs before another. + /// + /// The order is (first, second). + pub dependency: Vec<(ScheduleIndex, ScheduleIndex)>, + /// The components that these systems access. + pub components: Vec, + /// A list of conflicts between systems. + pub conflicts: Vec, +} + +/// Data about a component type. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct ComponentData { + /// The name of the component. + pub name: String, +} + +/// Data about a particular system. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct SystemData { + /// The name of the system. + pub name: String, + /// Whether this system is a sync point (aka [`ApplyDeferred`]). + pub apply_deferred: bool, + /// Whether this system is exclusive. + pub exclusive: bool, + /// Whether this system has deferred buffers to apply. + pub deferred: bool, + // TODO: Store the conditions specific to this system. +} + +/// Data about a particular system set. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct SystemSetData { + /// The name of the system set. + pub name: String, + /// The conditions applied to this system. + pub conditions: Vec, +} + +/// Data about a run condition for a system. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct ConditionData { + /// The name of the condition. + pub name: String, +} + +/// An index of an element in a schedule. +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +pub enum ScheduleIndex { + /// The index of a system. + System(u32), + /// The index of a system set. + SystemSet(u32), +} + +/// Data about an access conflict between two systems. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct SystemConflict { + /// The first system index. + pub system_1: u32, + /// The second system index. + pub system_2: u32, + /// The kind of conflict between these systems. + pub conflicting_access: AccessConflict, +} + +/// Data for describing the kind of access conflict. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub enum AccessConflict { + /// There is a conflict on the **whole world**, since one of the systems requires world access + /// and the other needs mutable access to (some of) the world. + World, + /// There is incompatible accesses to the listed components. + Components(Vec), +} + +/// A newtype for the index of a system set. +/// +/// This is the same kind of index as [`ScheduleIndex::SystemSet`], but for cases where we know we +/// can't have a system. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy, PartialOrd, Ord)] +pub struct SystemSetIndex(pub u32); + +impl ScheduleData { + /// Creates the data from the underlying [`Schedule`]. + /// + /// Note: we assume `schedule` has already been initialized. + pub fn from_schedule( + schedule: &Schedule, + world_components: &Components, + build_metadata: Option<&ScheduleBuildMetadata>, + ) -> Result { + let graph = schedule.graph(); + + let mut system_key_to_index = HashMap::new(); + let mut system_set_key_to_index = HashMap::new(); + + fn extract_condition_data(conditions: &[ConditionWithAccess]) -> Vec { + conditions + .iter() + .map(|condition| ConditionData { + name: format!("{}", condition.condition.name()), + }) + .collect() + } + + let systems = schedule + .systems() + .map_err(|_| { + ExtractAppDataError::ScheduleNotInitialized(format!("{:?}", schedule.label())) + })? + .enumerate() + .map(|(index, (key, system))| { + system_key_to_index.insert(key, index); + + let flags = system.flags(); + + SystemData { + name: format!("{}", system.name()), + apply_deferred: system.system_type() + == core::any::TypeId::of::(), + exclusive: flags.contains(SystemStateFlags::EXCLUSIVE), + deferred: flags.contains(SystemStateFlags::DEFERRED), + } + }) + .collect(); + + let system_sets = graph + .system_sets + .iter() + .enumerate() + .map(|(index, (key, system_set, conditions))| { + system_set_key_to_index.insert(key, index); + + SystemSetData { + name: format!("{:?}", system_set), + conditions: extract_condition_data(conditions), + } + }) + .collect(); + + let node_id_to_schedule_index = |node_id: NodeId| match node_id { + NodeId::System(key) => ScheduleIndex::System( + *system_key_to_index + .get(&key) + .expect("the system this key refers to should have already been seen") + as _, + ), + NodeId::Set(key) => ScheduleIndex::SystemSet( + *system_set_key_to_index + .get(&key) + .expect("the system set this key refers to should have already been seen") + as _, + ), + }; + + let hierarchy = graph + .hierarchy() + .graph() + .all_edges() + .map(|(parent, child)| { + let parent = system_set_key_to_index + .get( + &parent + .as_set() + .expect("the parent of a system/set is always a set"), + ) + .expect("the system set this key refers to should have already been seen"); + let child = node_id_to_schedule_index(child); + + (SystemSetIndex(*parent as _), child) + }) + .collect(); + + let mut dependency = graph + .dependency() + .graph() + .all_edges() + .map(|(a, b)| (node_id_to_schedule_index(a), node_id_to_schedule_index(b))) + .collect::>(); + + if let Some(build_metadata) = build_metadata { + // Add in all the edges that were created by build passes. + dependency.extend( + build_metadata + .edges_added_by_build_passes + .iter() + .map(|(a, b)| { + ( + node_id_to_schedule_index(NodeId::System(*a)), + node_id_to_schedule_index(NodeId::System(*b)), + ) + }), + ); + } + + let mut component_id_to_index = HashMap::::new(); + let mut components = vec![]; + + let conflicts = graph + .conflicting_systems() + .iter() + .map(|(system_1, system_2, conflicts)| { + let system_1 = system_key_to_index + .get(system_1) + .expect("the system this key refers to should have already been seen"); + let system_2 = system_key_to_index + .get(system_2) + .expect("the system this key refers to should have already been seen"); + + SystemConflict { + system_1: *system_1 as _, + system_2: *system_2 as _, + conflicting_access: if conflicts.is_empty() { + // The systems conflict on the world if there's no particular component IDs. + AccessConflict::World + } else { + AccessConflict::Components( + conflicts + .iter() + .map(|id| match component_id_to_index.entry(*id) { + Entry::Occupied(entry) => *entry.get() as _, + Entry::Vacant(entry) => { + let component = world_components.get_info(*id).expect( + "the component has already been registered by the system", + ); + + components.push(ComponentData { + name: format!("{}", component.name()), + }); + *entry.insert(components.len() - 1) as _ + } + }) + .collect(), + ) + }, + } + }) + .collect(); + + Ok(Self { + name: format!("{:?}", schedule.label()), + components, + systems, + system_sets, + hierarchy, + dependency, + conflicts, + }) + } +} + +/// An error occurring while attempting to extract schedule data from an app. +#[derive(Error, Debug)] +pub enum ExtractAppDataError { + /// A schedule has not been initialized through [`Schedule::initialize`]. + #[error("executable schedule has not been created for label \"{0}\"")] + ScheduleNotInitialized(String), +} + +#[cfg(test)] +/// Tests for extracted schedule data. +/// +/// This is public to allow other test modules in this crate to use its utilities. +pub mod tests { + use bevy_app::{App, Update}; + use bevy_ecs::{ + component::Component, + query::{With, Without}, + schedule::{IntoScheduleConfigs, Schedules, SystemSet}, + system::{Commands, Query}, + }; + use bevy_platform::collections::HashMap; + + use crate::schedule_data::serde::{ + AccessConflict, AppData, ComponentData, ExtractAppDataError, ScheduleData, ScheduleIndex, + SystemConflict, SystemData, SystemSetData, SystemSetIndex, + }; + + fn app_data_from_app(app: &mut App) -> Result { + let schedules = app.world_mut().resource::(); + // TODO: This is a pain. It would be nice to be able to just hokey-pokey the whole + // `Schedules` resource, but initializing a schedule writes to `Schedules`. Also we need to + // use interned labels since `Box` doesn't impl `ScheduleLabel`! + let interned_labels = schedules + .iter() + .map(|(_, schedule)| schedule.label()) + .collect::>(); + + let mut label_to_build_metadata = HashMap::new(); + + for label in interned_labels { + let mut schedule = app + .world_mut() + .resource_mut::() + .remove(label) + .expect("we just copied the label from this schedule"); + + let build_metadata = schedule.initialize(app.world_mut()).unwrap().unwrap(); + label_to_build_metadata.insert(label, build_metadata); + + app.world_mut().resource_mut::().insert(schedule); + } + + let mut app_data = AppData::from_schedules( + app.world().resource::(), + app.world().components(), + &label_to_build_metadata, + )?; + + remove_module_paths(&mut app_data); + sort_app_data(&mut app_data); + Ok(app_data) + } + + /// Removes the module paths from all items in the [`AppData`], so that moving tests around + /// doesn't change the output. + pub fn remove_module_paths(app_data: &mut AppData) { + for schedule in app_data.schedules.iter_mut() { + for system in schedule.systems.iter_mut() { + system.name = system.name.rsplit_once(":").unwrap().1.to_string(); + } + for set in schedule.system_sets.iter_mut() { + let name_modless = set + .name + .rsplit_once(":") + .map(|(_, suffix)| suffix) + .unwrap_or(set.name.as_str()) + .to_string(); + if set.name.starts_with("SystemTypeSet") { + // This is a set corresponding to a system. Make sure to keep the + // `SystemTypeSet` prefix. + set.name = format!("SystemTypeSet:{name_modless}"); + } else { + set.name = name_modless; + } + } + for component in schedule.components.iter_mut() { + component.name = component.name.rsplit_once(":").unwrap().1.to_string(); + } + } + } + + /// Sorts the [`AppData`] so we have a deterministic order when asserting. + // Note: we could do this when extracting unconditionally (even in prod), but there's not much + // point since schedule order is not guaranteed to be deterministic anyway. So relying on the + // same order seems weird. + pub fn sort_app_data(app_data: &mut AppData) { + // Sort schedules by name. + app_data + .schedules + .sort_by_key(|schedule| schedule.name.clone()); + // Sort each schedule. + app_data.schedules.iter_mut().for_each(sort_schedule); + + /// Sorts a schedule so that systems, system sets, conditions, and components are in name + /// order, and other structures are in index order. + fn sort_schedule(schedule: &mut ScheduleData) { + /// Sorts the slice using `key_fn` and returns a mapping, which maps the original index + /// to the new index. + fn reorder_slice( + slice: &mut [T], + key_fn: impl Fn(&T) -> K, + ) -> HashMap { + let mut mapping = (0..slice.len()).collect::>(); + // We assume the two sorts produce the same thing which should be true since we are + // using a stable sort. + mapping.sort_by_key(|index| key_fn(&slice[*index])); + slice.sort_by_key(key_fn); + + mapping + .into_iter() + // Enumerating produces the new indices. + .enumerate() + // Flip the order of indices so that we go from old to new. + .map(|(new, old)| (old, new)) + .collect() + } + + let system_old_index_to_new_index = + reorder_slice(&mut schedule.systems, |system| system.name.clone()); + let system_set_old_index_to_new_index = + reorder_slice(&mut schedule.system_sets, |set| set.name.clone()); + let component_old_index_to_new_index = + reorder_slice(&mut schedule.components, |component| component.name.clone()); + + let reindex_system = |index: &mut u32| { + *index = *system_old_index_to_new_index + .get(&(*index as usize)) + .unwrap() as u32; + }; + let reindex_system_set = |index: &mut u32| { + *index = *system_set_old_index_to_new_index + .get(&(*index as usize)) + .unwrap() as u32; + }; + let reindex_schedule_index = |index: &mut ScheduleIndex| match index { + ScheduleIndex::System(system) => reindex_system(system), + ScheduleIndex::SystemSet(set) => reindex_system_set(set), + }; + + let reindex_component = |index: &mut u32| { + *index = *component_old_index_to_new_index + .get(&(*index as usize)) + .unwrap() as u32; + }; + + // Sort the conditions in a system set. + for set in schedule.system_sets.iter_mut() { + set.conditions + .sort_by_key(|condition| condition.name.clone()); + } + + // Reindex the hierarchy, and sort it. + for (parent, child) in schedule.hierarchy.iter_mut() { + reindex_system_set(&mut parent.0); + reindex_schedule_index(child); + } + schedule.hierarchy.sort(); + + // Reindex the dependencies, and sort it. + for (parent, child) in schedule.dependency.iter_mut() { + reindex_schedule_index(parent); + reindex_schedule_index(child); + } + schedule.dependency.sort(); + + // Reindex the conflicts. + for conflict in schedule.conflicts.iter_mut() { + reindex_system(&mut conflict.system_1); + reindex_system(&mut conflict.system_2); + + // The order of the indices don't matter, so pick the ordering such that `system_1 < + // system_2`. + if conflict.system_1 > conflict.system_2 { + core::mem::swap(&mut conflict.system_1, &mut conflict.system_2); + } + + match &mut conflict.conflicting_access { + AccessConflict::World => {} + AccessConflict::Components(components) => { + components.iter_mut().for_each(reindex_component); + components.sort(); + } + }; + } + schedule + .conflicts + .sort_by_key(|conflict| (conflict.system_1, conflict.system_2)); + } + } + + /// Convenience to create a [`SystemData`] for the common case of no flags set. + pub fn simple_system(name: &str) -> SystemData { + SystemData { + name: name.into(), + apply_deferred: false, + exclusive: false, + deferred: false, + } + } + + /// Convenience to create a [`SystemSetData`] for the common case of being empty. + pub fn simple_system_set(name: &str) -> SystemSetData { + SystemSetData { + name: name.into(), + conditions: vec![], + } + } + + /// Convenience to create a [`ComponentData`] to make test cases shorter. + pub fn simple_component(name: &str) -> ComponentData { + ComponentData { name: name.into() } + } + + /// Convenience to create a [`SystemConflict`] to make test cases shorter. + pub fn conflict( + system_1: u32, + system_2: u32, + conflicting_access: AccessConflict, + ) -> SystemConflict { + SystemConflict { + system_1, + system_2, + conflicting_access, + } + } + + /// A convenience system set that is generic allowing us to make many of these quickly. + #[derive(SystemSet, Hash, PartialEq, Eq, Clone)] + struct MySet; + + impl core::fmt::Debug for MySet { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "MySet<{NUM}>") + } + } + + /// A convenience component that is generic allowing us to make many of these quickly. + #[derive(Component)] + struct MyComponent; + + #[test] + fn linear() { + let mut app = App::empty(); + + fn a() {} + fn b() {} + fn c() {} + + app.add_systems(Update, (a, b, c).chain()); + + let data = app_data_from_app(&mut app).unwrap(); + assert_eq!(data.schedules.len(), 1); + let schedule = &data.schedules[0]; + assert_eq!(schedule.name, "Update"); + assert_eq!( + schedule.systems, + [simple_system("a"), simple_system("b"), simple_system("c"),] + ); + // Each system is also a system set. + assert_eq!( + schedule.system_sets, + [ + simple_system_set("SystemTypeSet:a"), + simple_system_set("SystemTypeSet:b"), + simple_system_set("SystemTypeSet:c"), + ] + ); + // Every system is in its own system set. + assert_eq!( + schedule.hierarchy, + [ + (SystemSetIndex(0), ScheduleIndex::System(0)), + (SystemSetIndex(1), ScheduleIndex::System(1)), + (SystemSetIndex(2), ScheduleIndex::System(2)), + ] + ); + // There are 2 dependency edges to connect a-b and b-c. + assert_eq!( + schedule.dependency, + [ + (ScheduleIndex::System(0), ScheduleIndex::System(1)), + (ScheduleIndex::System(1), ScheduleIndex::System(2)), + ] + ); + assert_eq!(schedule.components.len(), 0); + assert_eq!(schedule.conflicts.len(), 0); + } + + #[test] + fn linear_with_system_sets() { + let mut app = App::empty(); + + app.configure_sets(Update, (MySet::<0>, MySet::<1>, MySet::<2>).chain()); + + let data = app_data_from_app(&mut app).unwrap(); + assert_eq!(data.schedules.len(), 1); + let schedule = &data.schedules[0]; + assert_eq!(schedule.name, "Update"); + assert_eq!(schedule.systems, []); + assert_eq!( + schedule.system_sets, + [ + simple_system_set("MySet<0>"), + simple_system_set("MySet<1>"), + simple_system_set("MySet<2>"), + ] + ); + assert_eq!(schedule.hierarchy, []); + // There are 2 dependency edges to connect 0-1 and 1-2. + assert_eq!( + schedule.dependency, + [ + (ScheduleIndex::SystemSet(0), ScheduleIndex::SystemSet(1)), + (ScheduleIndex::SystemSet(1), ScheduleIndex::SystemSet(2)), + ] + ); + assert_eq!(schedule.components.len(), 0); + assert_eq!(schedule.conflicts.len(), 0); + } + + #[test] + fn stack_of_system_sets() { + let mut app = App::empty(); + + fn a() {} + + app.add_systems(Update, a.in_set(MySet::<0>)) + .configure_sets(Update, MySet::<0>.in_set(MySet::<1>)) + .configure_sets(Update, MySet::<1>.in_set(MySet::<2>)); + + let data = app_data_from_app(&mut app).unwrap(); + assert_eq!(data.schedules.len(), 1); + let schedule = &data.schedules[0]; + assert_eq!(schedule.name, "Update"); + assert_eq!(schedule.systems, [simple_system("a")]); + assert_eq!( + schedule.system_sets, + [ + simple_system_set("MySet<0>"), + simple_system_set("MySet<1>"), + simple_system_set("MySet<2>"), + simple_system_set("SystemTypeSet:a"), + ] + ); + assert_eq!( + schedule.hierarchy, + [ + (SystemSetIndex(0), ScheduleIndex::System(0)), + (SystemSetIndex(1), ScheduleIndex::SystemSet(0)), + (SystemSetIndex(2), ScheduleIndex::SystemSet(1)), + (SystemSetIndex(3), ScheduleIndex::System(0)), + ] + ); + assert_eq!(schedule.dependency, []); + assert_eq!(schedule.components.len(), 0); + assert_eq!(schedule.conflicts.len(), 0); + } + + #[test] + fn records_system_kind_flags() { + let mut app = App::empty(); + + fn a0(_commands: Commands) {} + fn a1(_commands: Commands) {} + fn b0() {} + fn b1() {} + + fn c0() {} + fn c1() {} + + app.add_systems(Update, (((a0, a1), (b0, b1)).chain(), (c0, c1).chain())); + + let data = app_data_from_app(&mut app).unwrap(); + assert_eq!(data.schedules.len(), 1); + let schedule = &data.schedules[0]; + assert_eq!(schedule.name, "Update"); + assert_eq!( + schedule.systems, + [ + SystemData { + name: "a0".into(), + apply_deferred: false, + exclusive: false, + deferred: true, + }, + SystemData { + name: "a1".into(), + apply_deferred: false, + exclusive: false, + deferred: true, + }, + SystemData { + name: "apply_deferred".into(), + apply_deferred: true, + exclusive: true, + deferred: false, + }, + simple_system("b0"), + simple_system("b1"), + simple_system("c0"), + simple_system("c1"), + ] + ); + assert_eq!( + schedule.system_sets, + [ + simple_system_set("SystemTypeSet:a0"), + simple_system_set("SystemTypeSet:a1"), + simple_system_set("SystemTypeSet:b0"), + simple_system_set("SystemTypeSet:b1"), + simple_system_set("SystemTypeSet:c0"), + simple_system_set("SystemTypeSet:c1"), + ] + ); + assert_eq!( + schedule.hierarchy, + [ + (SystemSetIndex(0), ScheduleIndex::System(0)), + (SystemSetIndex(1), ScheduleIndex::System(1)), + (SystemSetIndex(2), ScheduleIndex::System(3)), + (SystemSetIndex(3), ScheduleIndex::System(4)), + (SystemSetIndex(4), ScheduleIndex::System(5)), + (SystemSetIndex(5), ScheduleIndex::System(6)), + ] + ); + assert_eq!( + schedule.dependency, + [ + // a->sync and a->b + (ScheduleIndex::System(0), ScheduleIndex::System(2)), + (ScheduleIndex::System(0), ScheduleIndex::System(3)), + (ScheduleIndex::System(0), ScheduleIndex::System(4)), + (ScheduleIndex::System(1), ScheduleIndex::System(2)), + (ScheduleIndex::System(1), ScheduleIndex::System(3)), + (ScheduleIndex::System(1), ScheduleIndex::System(4)), + // sync->b + (ScheduleIndex::System(2), ScheduleIndex::System(3)), + (ScheduleIndex::System(2), ScheduleIndex::System(4)), + // c0->c1 + (ScheduleIndex::System(5), ScheduleIndex::System(6)), + ] + ); + assert_eq!(schedule.components.len(), 0); + assert_eq!(schedule.conflicts.len(), 0); + } + + #[test] + fn records_conflicts() { + let mut app = App::empty(); + + // These two systems don't conflict. + fn a0(_: Query<&MyComponent<0>>) {} + fn a1(_: Query<&MyComponent<0>>) {} + + // These two systems conflict on one component. + fn b0(_: Query<&MyComponent<1>>) {} + fn b1(_: Query<&mut MyComponent<1>>) {} + + // These two systems conflict on two components. + fn c0( + _: Query<( + &MyComponent<2>, + &mut MyComponent<3>, + &MyComponent<4>, + &MyComponent<5>, + )>, + ) { + } + fn c1( + _: Query<( + &mut MyComponent<2>, + &MyComponent<3>, + &MyComponent<4>, + &MyComponent<6>, + )>, + ) { + } + + // These two systems use With/Without to avoid a conflict. + fn d0(_: Query<&mut MyComponent<7>, With>>) {} + fn d1(_: Query<&mut MyComponent<7>, Without>>) {} + + // These two systems use an ordering to avoid a conflict. + fn e0(_: Query<&mut MyComponent<9>>) {} + fn e1(_: Query<&mut MyComponent<9>>) {} + + app.add_systems(Update, (a0, a1, b0, b1, c0, c1, d0, d1, (e0, e1).chain())); + + let data = app_data_from_app(&mut app).unwrap(); + assert_eq!(data.schedules.len(), 1); + let schedule = &data.schedules[0]; + assert_eq!(schedule.name, "Update"); + assert_eq!( + schedule.systems, + [ + simple_system("a0"), + simple_system("a1"), + simple_system("b0"), + simple_system("b1"), + simple_system("c0"), + simple_system("c1"), + simple_system("d0"), + simple_system("d1"), + simple_system("e0"), + simple_system("e1"), + ] + ); + assert_eq!( + schedule.system_sets, + [ + simple_system_set("SystemTypeSet:a0"), + simple_system_set("SystemTypeSet:a1"), + simple_system_set("SystemTypeSet:b0"), + simple_system_set("SystemTypeSet:b1"), + simple_system_set("SystemTypeSet:c0"), + simple_system_set("SystemTypeSet:c1"), + simple_system_set("SystemTypeSet:d0"), + simple_system_set("SystemTypeSet:d1"), + simple_system_set("SystemTypeSet:e0"), + simple_system_set("SystemTypeSet:e1"), + ] + ); + assert_eq!( + schedule.hierarchy, + [ + (SystemSetIndex(0), ScheduleIndex::System(0)), + (SystemSetIndex(1), ScheduleIndex::System(1)), + (SystemSetIndex(2), ScheduleIndex::System(2)), + (SystemSetIndex(3), ScheduleIndex::System(3)), + (SystemSetIndex(4), ScheduleIndex::System(4)), + (SystemSetIndex(5), ScheduleIndex::System(5)), + (SystemSetIndex(6), ScheduleIndex::System(6)), + (SystemSetIndex(7), ScheduleIndex::System(7)), + (SystemSetIndex(8), ScheduleIndex::System(8)), + (SystemSetIndex(9), ScheduleIndex::System(9)), + ] + ); + assert_eq!( + schedule.dependency, + [ + // e0 -> e1 + (ScheduleIndex::System(8), ScheduleIndex::System(9)), + ] + ); + assert_eq!( + schedule.components, + [ + simple_component("MyComponent<1>"), + simple_component("MyComponent<2>"), + simple_component("MyComponent<3>"), + ] + ); + assert_eq!( + schedule.conflicts, + [ + conflict(2, 3, AccessConflict::Components(vec![0])), + conflict(4, 5, AccessConflict::Components(vec![1, 2])) + ] + ); + } +} diff --git a/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs b/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs index da9d24dd536ff..e95869524e9e5 100644 --- a/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs +++ b/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs @@ -1,10 +1,10 @@ -use alloc::{boxed::Box, collections::BTreeSet, vec::Vec}; +use alloc::{borrow::ToOwned, boxed::Box, collections::BTreeSet, vec::Vec}; use bevy_platform::{collections::HashMap, hash::FixedHasher}; use indexmap::IndexSet; use crate::{ - schedule::{graph::Dag, SystemKey, SystemSetKey}, + schedule::{FlattenedDependencies, SystemKey, SystemSetKey}, system::{IntoSystem, System}, world::World, }; @@ -73,12 +73,13 @@ impl ScheduleBuildPass for AutoInsertApplyDeferredPass { &mut self, _world: &mut World, graph: &mut ScheduleGraph, - dependency_flattened: &mut Dag, + mut dependency_flattened: FlattenedDependencies<'_>, ) -> Result<(), ScheduleBuildError> { - let mut sync_point_graph = dependency_flattened.graph().clone(); let (topo, flat_dependency) = dependency_flattened .toposort_and_graph() .map_err(ScheduleBuildError::FlatDependencySort)?; + let topo = topo.to_owned(); + let flat_dependency = flat_dependency.to_owned(); fn set_has_conditions(graph: &ScheduleGraph, set: SystemSetKey) -> bool { graph.system_sets.has_conditions(set) @@ -125,7 +126,7 @@ impl ScheduleBuildPass for AutoInsertApplyDeferredPass { let mut distance_to_explicit_sync_node: HashMap = HashMap::default(); // Determine the distance for every node and collect the explicit sync points. - for &key in topo { + for &key in topo.iter() { let (node_distance, mut node_needs_sync) = distances_and_pending_sync .get(&key) .copied() @@ -180,7 +181,7 @@ impl ScheduleBuildPass for AutoInsertApplyDeferredPass { // Find any edges which have a different number of sync points between them and make sure // there is a sync point between them. - for &key in topo { + for &key in topo.iter() { let (node_distance, _) = distances_and_pending_sync .get(&key) .copied() @@ -208,15 +209,13 @@ impl ScheduleBuildPass for AutoInsertApplyDeferredPass { .copied() .unwrap_or_else(|| self.get_sync_point(graph, target_distance)); - sync_point_graph.add_edge(key, sync_point); - sync_point_graph.add_edge(sync_point, target); + dependency_flattened.add_edge(key, sync_point); + dependency_flattened.add_edge(sync_point, target); // The edge without the sync point is now redundant. - sync_point_graph.remove_edge(key, target); + dependency_flattened.remove_edge(key, target); } } - - **dependency_flattened = sync_point_graph; Ok(()) } diff --git a/crates/bevy_ecs/src/schedule/mod.rs b/crates/bevy_ecs/src/schedule/mod.rs index bc8af8d4867e0..d50442c9bb87f 100644 --- a/crates/bevy_ecs/src/schedule/mod.rs +++ b/crates/bevy_ecs/src/schedule/mod.rs @@ -13,7 +13,7 @@ mod stepping; pub use self::graph::GraphInfo; pub use self::{condition::*, config::*, error::*, executor::*, node::*, schedule::*, set::*}; -pub use pass::ScheduleBuildPass; +pub use pass::{FlattenedDependencies, ScheduleBuildPass}; /// An implementation of a graph data structure. pub mod graph; diff --git a/crates/bevy_ecs/src/schedule/pass.rs b/crates/bevy_ecs/src/schedule/pass.rs index 833f98ea0f32d..d0d42e0e98b4a 100644 --- a/crates/bevy_ecs/src/schedule/pass.rs +++ b/crates/bevy_ecs/src/schedule/pass.rs @@ -2,15 +2,19 @@ use alloc::{boxed::Box, vec::Vec}; use core::{ any::{Any, TypeId}, fmt::Debug, + ops::Deref, }; -use bevy_platform::hash::FixedHasher; +use bevy_platform::{collections::HashSet, hash::FixedHasher}; use bevy_utils::TypeIdMap; use indexmap::IndexSet; use super::{DiGraph, NodeId, ScheduleBuildError, ScheduleGraph}; use crate::{ - schedule::{graph::Dag, SystemKey, SystemSetKey}, + schedule::{ + graph::{Dag, DagAnalysis, DiGraphToposortError}, + SystemKey, SystemSetKey, + }, world::World, }; @@ -37,17 +41,83 @@ pub trait ScheduleBuildPass: Send + Sync + Debug + 'static { &mut self, world: &mut World, graph: &mut ScheduleGraph, - dependency_flattened: &mut Dag, + dependency_flattened: FlattenedDependencies<'_>, ) -> Result<(), ScheduleBuildError>; } +/// A wrapper around the directed, acyclic graph of system edges. +/// +/// This allows tracking mutations to the graph for recording build pass changes. +pub struct FlattenedDependencies<'a> { + /// The graph of dependency edges. + pub(crate) dag: &'a mut Dag, + /// The edges that have been added by build passes. + pub(crate) added_edges: &'a mut HashSet<(SystemKey, SystemKey)>, +} + +impl Deref for FlattenedDependencies<'_> { + type Target = Dag; + + fn deref(&self) -> &Self::Target { + self.dag + } +} + +impl FlattenedDependencies<'_> { + /// Adds an edge to the dependencies such that `system_1` runs before `system_2`. + pub fn add_edge(&mut self, system_1: SystemKey, system_2: SystemKey) { + self.dag.add_edge(system_1, system_2); + self.added_edges.insert((system_1, system_2)); + } + + /// Removes an edge going from `system_1` to `system_2` in the dependencies. + /// + /// This should be used with caution - removing edges this way can lead to **very** surprising + /// behavior. However, this function can be used to remove dependencies that are made redundant + /// by added edges. + /// + /// Note: these edges are **not** reported like the added edges are. + pub fn remove_edge(&mut self, system_1: SystemKey, system_2: SystemKey) { + self.dag.remove_edge(system_1, system_2); + // We intentionally don't record edges (like `self.added_edges`) because it's unlikely that + // users call this for anything other than redundant edges, and because these redundant + // edges are actually important. It would be confusing if a visualizer omitted the removed + // edges, since an edge you add in your plugin may not appear in the visualizer due to being + // removed! + } + + /// Returns a topological ordering of the graph, computing it if the graph is dirty. + /// + /// This function matches [`Dag::toposort`]. + pub fn toposort(&mut self) -> Result<&[SystemKey], DiGraphToposortError> { + self.dag.toposort() + } + + /// Returns both the topological ordering and the underlying graph, computing the toposort if + /// the graph is dirty. + /// + /// This function matches [`Dag::toposort_and_graph`]. + pub fn toposort_and_graph( + &mut self, + ) -> Result<(&[SystemKey], &DiGraph), DiGraphToposortError> { + self.dag.toposort_and_graph() + } + + /// Processes the DAG and computes various properties about it. + /// + /// This function matches [`Dag::analyze`]. + pub fn analyze(&mut self) -> Result, DiGraphToposortError> { + self.dag.analyze() + } +} + /// Object safe version of [`ScheduleBuildPass`]. pub(super) trait ScheduleBuildPassObj: Send + Sync + Debug { fn build( &mut self, world: &mut World, graph: &mut ScheduleGraph, - dependency_flattened: &mut Dag, + dependency_flattened: FlattenedDependencies<'_>, ) -> Result<(), ScheduleBuildError>; fn collapse_set( @@ -65,7 +135,7 @@ impl ScheduleBuildPassObj for T { &mut self, world: &mut World, graph: &mut ScheduleGraph, - dependency_flattened: &mut Dag, + dependency_flattened: FlattenedDependencies<'_>, ) -> Result<(), ScheduleBuildError> { self.build(world, graph, dependency_flattened) } diff --git a/crates/bevy_ecs/src/schedule/schedule.rs b/crates/bevy_ecs/src/schedule/schedule.rs index fc8f958bf865b..6896e1102f3a7 100644 --- a/crates/bevy_ecs/src/schedule/schedule.rs +++ b/crates/bevy_ecs/src/schedule/schedule.rs @@ -10,6 +10,7 @@ use alloc::{ vec, vec::Vec, }; +use bevy_ecs_macros::Event; use bevy_platform::{ collections::{HashMap, HashSet}, hash::FixedHasher, @@ -384,7 +385,6 @@ pub struct Schedule { executable: SystemSchedule, executor: Box, executor_initialized: bool, - warnings: Vec, } #[derive(ScheduleLabel, Hash, PartialEq, Eq, Debug, Clone)] @@ -409,7 +409,6 @@ impl Schedule { executable: SystemSchedule::new(), executor: default_executor(), executor_initialized: false, - warnings: Vec::new(), }; // Call `set_build_settings` to add any default build passes this.set_build_settings(Default::default()); @@ -591,22 +590,35 @@ impl Schedule { /// Initializes any newly-added systems and conditions, rebuilds the executable schedule, /// and re-initializes the executor. /// - /// Moves all systems and run conditions out of the [`ScheduleGraph`]. - pub fn initialize(&mut self, world: &mut World) -> Result<(), ScheduleBuildError> { + /// Moves all systems and run conditions out of the [`ScheduleGraph`]. If the schedule is built + /// successfully, returns [`Some`] with the metadata. If the schedule has previously been built + /// successfully, returns [`None`]. + pub fn initialize( + &mut self, + world: &mut World, + ) -> Result, ScheduleBuildError> { + let mut build_metadata = None; if self.graph.changed { self.graph.initialize(world); let ignored_ambiguities = world .get_resource_or_init::() .ignored_scheduling_ambiguities .clone(); - self.warnings = self.graph.update_schedule( - world, - &mut self.executable, - &ignored_ambiguities, - self.label, - )?; + + let mut event = ScheduleBuilt { + label: self.label, + build_metadata: self.graph.update_schedule( + world, + &mut self.executable, + &ignored_ambiguities, + self.label, + )?, + }; self.graph.changed = false; self.executor_initialized = false; + + world.trigger_ref(&mut event); + build_metadata = Some(event.build_metadata); } if !self.executor_initialized { @@ -614,7 +626,7 @@ impl Schedule { self.executor_initialized = true; } - Ok(()) + Ok(build_metadata) } /// Returns the [`ScheduleGraph`]. @@ -699,12 +711,6 @@ impl Schedule { self.executable.systems.len() } } - - /// Returns warnings that were generated during the last call to - /// [`Schedule::initialize`]. - pub fn warnings(&self) -> &[ScheduleBuildWarning] { - &self.warnings - } } /// Metadata for a [`Schedule`]. @@ -1145,7 +1151,7 @@ impl ScheduleGraph { &mut self, world: &mut World, ignored_ambiguities: &BTreeSet, - ) -> Result<(SystemSchedule, Vec), ScheduleBuildError> { + ) -> Result<(SystemSchedule, ScheduleBuildMetadata), ScheduleBuildError> { let mut warnings = Vec::new(); // Check system set memberships for cycles. @@ -1205,8 +1211,16 @@ impl ScheduleGraph { // Allow modification of the schedule graph by build passes. let mut passes = core::mem::take(&mut self.passes); + let mut added_edges = Default::default(); for pass in passes.values_mut() { - pass.build(world, self, &mut flat_dependency)?; + pass.build( + world, + self, + FlattenedDependencies { + dag: &mut flat_dependency, + added_edges: &mut added_edges, + }, + )?; } self.passes = passes; @@ -1244,7 +1258,10 @@ impl ScheduleGraph { // build the schedule Ok(( self.build_schedule_inner(flat_dependency, hierarchy_analysis), - warnings, + ScheduleBuildMetadata { + warnings, + edges_added_by_build_passes: added_edges, + }, )) } @@ -1350,7 +1367,7 @@ impl ScheduleGraph { schedule: &mut SystemSchedule, ignored_ambiguities: &BTreeSet, schedule_label: InternedScheduleLabel, - ) -> Result, ScheduleBuildError> { + ) -> Result { if !self.systems.is_initialized() || !self.system_sets.is_initialized() { return Err(ScheduleBuildError::Uninitialized); } @@ -1381,10 +1398,10 @@ impl ScheduleGraph { } } - let (new_schedule, warnings) = self.build_schedule(world, ignored_ambiguities)?; + let (new_schedule, build_metadata) = self.build_schedule(world, ignored_ambiguities)?; *schedule = new_schedule; - for warning in &warnings { + for warning in &build_metadata.warnings { warn!( "{:?} schedule built successfully, however: {}", schedule_label, @@ -1405,7 +1422,7 @@ impl ScheduleGraph { schedule.set_conditions.push(conditions); } - Ok(warnings) + Ok(build_metadata) } } @@ -1611,6 +1628,30 @@ impl ScheduleBuildSettings { } } +/// Metadata about the schedule build process. +pub struct ScheduleBuildMetadata { + /// Warnings about the schedule graph detected by the build process. + pub warnings: Vec, + /// Edges added by [`ScheduleBuildPass`]es. + /// + /// These edges are not stored in the [`ScheduleGraph`], and so are only available during the + /// build process. + pub edges_added_by_build_passes: HashSet<(SystemKey, SystemKey)>, +} + +/// An event triggered when a schedule is successfully built. +/// +/// Note: When this event is triggered, the corresponding [`Schedule`] is not present in the world. +/// So, observers will need to cache whatever data they need from this and access it later once the +/// schedule is not running. +#[derive(Event)] +pub struct ScheduleBuilt { + /// The schedule that was built. + pub label: InternedScheduleLabel, + /// The metadata for the build process of this schedule. + pub build_metadata: ScheduleBuildMetadata, +} + /// Error to denote that [`Schedule::initialize`] or [`Schedule::run`] has not yet been called for /// this schedule. #[derive(Error, Debug)] @@ -1628,8 +1669,9 @@ mod tests { error::{ignore, panic, DefaultErrorHandler, Result}, prelude::{ApplyDeferred, IntoSystemSet, Res, Resource}, schedule::{ - passes::AutoInsertApplyDeferredPass, tests::ResMut, IntoScheduleConfigs, Schedule, - ScheduleBuildPass, ScheduleBuildSettings, ScheduleCleanupPolicy, SystemSet, + passes::AutoInsertApplyDeferredPass, tests::ResMut, FlattenedDependencies, + IntoScheduleConfigs, Schedule, ScheduleBuildPass, ScheduleBuildSettings, + ScheduleCleanupPolicy, SystemSet, }, system::Commands, world::World, @@ -2626,7 +2668,7 @@ mod tests { &mut self, _world: &mut World, _graph: &mut super::ScheduleGraph, - _dependency_flattened: &mut crate::schedule::graph::Dag, + _dependency_flattened: FlattenedDependencies<'_>, ) -> core::result::Result<(), crate::schedule::ScheduleBuildError> { Ok(()) } diff --git a/crates/bevy_internal/Cargo.toml b/crates/bevy_internal/Cargo.toml index 1b7c07d92e602..751843a0b33b0 100644 --- a/crates/bevy_internal/Cargo.toml +++ b/crates/bevy_internal/Cargo.toml @@ -460,6 +460,7 @@ hotpatching = ["bevy_app/hotpatching", "bevy_ecs/hotpatching"] debug = ["bevy_utils/debug", "bevy_ecs/debug", "bevy_render?/debug"] screenrecording = ["bevy_dev_tools/screenrecording"] +schedule_data = ["bevy_dev_tools/schedule_data"] [dependencies] # bevy (no_std) diff --git a/docs/cargo_features.md b/docs/cargo_features.md index 30b19f33a1ef2..ddca168074618 100644 --- a/docs/cargo_features.md +++ b/docs/cargo_features.md @@ -168,6 +168,7 @@ This is the complete `bevy` cargo feature list, without "profiles" or "collectio |reflect_auto_register_static|Enable automatic reflect registration without inventory. See `reflect::load_type_registrations` for more info.| |reflect_documentation|Enables bevy_reflect to access documentation comments of rust code at runtime| |reflect_functions|Enable function reflection| +|schedule_data|Enable collecting schedule data from the app.| |serialize|Enable serialization support through serde| |shader_format_glsl|Enable support for shaders in GLSL| |shader_format_spirv|Enable support for shaders in SPIR-V| diff --git a/examples/README.md b/examples/README.md index 399e7de23ed02..43caf4cc7a9ec 100644 --- a/examples/README.md +++ b/examples/README.md @@ -310,6 +310,7 @@ Example | Description Example | Description --- | --- +[Extract Schedule Data](../examples/dev_tools/schedule_data.rs) | Extracts the schedule data from a default app and writes it to a file [FPS overlay](../examples/dev_tools/fps_overlay.rs) | Demonstrates FPS overlay [Infinite grid](../examples/dev_tools/infinite_grid.rs) | Demonstrates Bevy's infinite grid, suitable as a ground plane for editors diff --git a/examples/dev_tools/schedule_data.rs b/examples/dev_tools/schedule_data.rs new file mode 100644 index 0000000000000..f51a6a03d4a60 --- /dev/null +++ b/examples/dev_tools/schedule_data.rs @@ -0,0 +1,14 @@ +//! This example demonstrates how to automatically serialize schedule data. + +use bevy::{dev_tools::schedule_data::plugin::*, prelude::*}; + +fn main() { + App::new() + .add_plugins((DefaultPlugins, SerializeSchedulesPlugin::default())) + // This resource is only necessary to put the output in a nice spot for the example code. + // By default, this lands at "/app_data.ron". + .insert_resource(SerializeSchedulesFilePath( + "examples/dev_tools/app_data.ron".into(), + )) + .run(); +}