bevy_ecs/batching.rs
1//! Types for controlling batching behavior during parallel processing.
2
3use core::ops::Range;
4
5/// Dictates how a parallel operation chunks up large quantities
6/// during iteration.
7///
8/// A parallel query will chunk up large tables and archetypes into
9/// chunks of at most a certain batch size. Similarly, a parallel event
10/// reader will chunk up the remaining events.
11///
12/// By default, this batch size is automatically determined by dividing
13/// the size of the largest matched archetype by the number
14/// of threads (rounded up). This attempts to minimize the overhead of scheduling
15/// tasks onto multiple threads, but assumes each entity has roughly the
16/// same amount of work to be done, which may not hold true in every
17/// workload.
18///
19/// See [`Query::par_iter`], [`EventReader::par_read`] for more information.
20///
21/// [`Query::par_iter`]: crate::system::Query::par_iter
22/// [`EventReader::par_read`]: crate::event::EventReader::par_read
23#[derive(Clone, Debug)]
24pub struct BatchingStrategy {
25 /// The upper and lower limits for a batch of entities.
26 ///
27 /// Setting the bounds to the same value will result in a fixed
28 /// batch size.
29 ///
30 /// Defaults to `[1, usize::MAX]`.
31 pub batch_size_limits: Range<usize>,
32 /// The number of batches per thread in the [`ComputeTaskPool`].
33 /// Increasing this value will decrease the batch size, which may
34 /// increase the scheduling overhead for the iteration.
35 ///
36 /// Defaults to 1.
37 ///
38 /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool
39 pub batches_per_thread: usize,
40}
41
42impl Default for BatchingStrategy {
43 fn default() -> Self {
44 Self::new()
45 }
46}
47
48impl BatchingStrategy {
49 /// Creates a new unconstrained default batching strategy.
50 pub const fn new() -> Self {
51 Self {
52 batch_size_limits: 1..usize::MAX,
53 batches_per_thread: 1,
54 }
55 }
56
57 /// Declares a batching strategy with a fixed batch size.
58 pub const fn fixed(batch_size: usize) -> Self {
59 Self {
60 batch_size_limits: batch_size..batch_size,
61 batches_per_thread: 1,
62 }
63 }
64
65 /// Configures the minimum allowed batch size of this instance.
66 pub const fn min_batch_size(mut self, batch_size: usize) -> Self {
67 self.batch_size_limits.start = batch_size;
68 self
69 }
70
71 /// Configures the maximum allowed batch size of this instance.
72 pub const fn max_batch_size(mut self, batch_size: usize) -> Self {
73 self.batch_size_limits.end = batch_size;
74 self
75 }
76
77 /// Configures the number of batches to assign to each thread for this instance.
78 pub fn batches_per_thread(mut self, batches_per_thread: usize) -> Self {
79 assert!(
80 batches_per_thread > 0,
81 "The number of batches per thread must be non-zero."
82 );
83 self.batches_per_thread = batches_per_thread;
84 self
85 }
86
87 /// Calculate the batch size according to the given thread count and max item count.
88 /// The count is provided as a closure so that it can be calculated only if needed.
89 ///
90 /// # Panics
91 ///
92 /// Panics if `thread_count` is 0.
93 #[inline]
94 pub fn calc_batch_size(&self, max_items: impl FnOnce() -> usize, thread_count: usize) -> usize {
95 if self.batch_size_limits.is_empty() {
96 return self.batch_size_limits.start;
97 }
98 assert!(
99 thread_count > 0,
100 "Attempted to run parallel iteration with an empty TaskPool"
101 );
102 let batches = thread_count * self.batches_per_thread;
103 // Round up to the nearest batch size.
104 let batch_size = max_items().div_ceil(batches);
105 batch_size.clamp(self.batch_size_limits.start, self.batch_size_limits.end)
106 }
107}