bevy_ecs/schedule/executor/
single_threaded.rs#[cfg(feature = "trace")]
use bevy_utils::tracing::info_span;
use core::panic::AssertUnwindSafe;
use fixedbitset::FixedBitSet;
use crate::{
schedule::{is_apply_deferred, BoxedCondition, ExecutorKind, SystemExecutor, SystemSchedule},
world::World,
};
use super::__rust_begin_short_backtrace;
#[derive(Default)]
pub struct SingleThreadedExecutor {
evaluated_sets: FixedBitSet,
completed_systems: FixedBitSet,
unapplied_systems: FixedBitSet,
apply_final_deferred: bool,
}
impl SystemExecutor for SingleThreadedExecutor {
fn kind(&self) -> ExecutorKind {
ExecutorKind::SingleThreaded
}
fn init(&mut self, schedule: &SystemSchedule) {
let sys_count = schedule.system_ids.len();
let set_count = schedule.set_ids.len();
self.evaluated_sets = FixedBitSet::with_capacity(set_count);
self.completed_systems = FixedBitSet::with_capacity(sys_count);
self.unapplied_systems = FixedBitSet::with_capacity(sys_count);
}
fn run(
&mut self,
schedule: &mut SystemSchedule,
world: &mut World,
_skip_systems: Option<&FixedBitSet>,
) {
#[cfg(feature = "bevy_debug_stepping")]
if let Some(skipped_systems) = _skip_systems {
self.completed_systems |= skipped_systems;
}
for system_index in 0..schedule.systems.len() {
#[cfg(feature = "trace")]
let name = schedule.systems[system_index].name();
#[cfg(feature = "trace")]
let should_run_span = info_span!("check_conditions", name = &*name).entered();
let mut should_run = !self.completed_systems.contains(system_index);
for set_idx in schedule.sets_with_conditions_of_systems[system_index].ones() {
if self.evaluated_sets.contains(set_idx) {
continue;
}
let set_conditions_met =
evaluate_and_fold_conditions(&mut schedule.set_conditions[set_idx], world);
if !set_conditions_met {
self.completed_systems
.union_with(&schedule.systems_in_sets_with_conditions[set_idx]);
}
should_run &= set_conditions_met;
self.evaluated_sets.insert(set_idx);
}
let system_conditions_met =
evaluate_and_fold_conditions(&mut schedule.system_conditions[system_index], world);
should_run &= system_conditions_met;
let system = &mut schedule.systems[system_index];
if should_run {
let valid_params = system.validate_param(world);
should_run &= valid_params;
}
#[cfg(feature = "trace")]
should_run_span.exit();
self.completed_systems.insert(system_index);
if !should_run {
continue;
}
if is_apply_deferred(system) {
self.apply_deferred(schedule, world);
continue;
}
let res = std::panic::catch_unwind(AssertUnwindSafe(|| {
if system.is_exclusive() {
__rust_begin_short_backtrace::run(&mut **system, world);
} else {
let world = world.as_unsafe_world_cell();
system.update_archetype_component_access(world);
unsafe { __rust_begin_short_backtrace::run_unsafe(&mut **system, world) };
}
}));
if let Err(payload) = res {
eprintln!("Encountered a panic in system `{}`!", &*system.name());
std::panic::resume_unwind(payload);
}
self.unapplied_systems.insert(system_index);
}
if self.apply_final_deferred {
self.apply_deferred(schedule, world);
}
self.evaluated_sets.clear();
self.completed_systems.clear();
}
fn set_apply_final_deferred(&mut self, apply_final_deferred: bool) {
self.apply_final_deferred = apply_final_deferred;
}
}
impl SingleThreadedExecutor {
pub const fn new() -> Self {
Self {
evaluated_sets: FixedBitSet::new(),
completed_systems: FixedBitSet::new(),
unapplied_systems: FixedBitSet::new(),
apply_final_deferred: true,
}
}
fn apply_deferred(&mut self, schedule: &mut SystemSchedule, world: &mut World) {
for system_index in self.unapplied_systems.ones() {
let system = &mut schedule.systems[system_index];
system.apply_deferred(world);
}
self.unapplied_systems.clear();
}
}
fn evaluate_and_fold_conditions(conditions: &mut [BoxedCondition], world: &mut World) -> bool {
#[allow(clippy::unnecessary_fold)]
conditions
.iter_mut()
.map(|condition| {
if !condition.validate_param(world) {
return false;
}
__rust_begin_short_backtrace::readonly_run(&mut **condition, world)
})
.fold(true, |acc, res| acc && res)
}