bevy_render/render_resource/specializer.rs
1use super::{
2 CachedComputePipelineId, CachedRenderPipelineId, ComputePipeline, ComputePipelineDescriptor,
3 PipelineCache, RenderPipeline, RenderPipelineDescriptor,
4};
5use bevy_ecs::error::BevyError;
6use bevy_platform::{
7 collections::{
8 hash_map::{Entry, VacantEntry},
9 HashMap,
10 },
11 hash::FixedHasher,
12};
13use core::{hash::Hash, marker::PhantomData};
14use tracing::error;
15use variadics_please::all_tuples;
16
17pub use bevy_render_macros::{Specializer, SpecializerKey};
18
19/// Defines a type that is able to be "specialized" and cached by creating and transforming
20/// its descriptor type. This is implemented for [`RenderPipeline`] and [`ComputePipeline`], and
21/// likely will not have much utility for other types.
22///
23/// See docs on [`Specializer`] for more info.
24pub trait Specializable {
25 type Descriptor: PartialEq + Clone + Send + Sync;
26 type CachedId: Clone + Send + Sync;
27 fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId;
28 fn get_descriptor(pipeline_cache: &PipelineCache, id: Self::CachedId) -> &Self::Descriptor;
29}
30
31impl Specializable for RenderPipeline {
32 type Descriptor = RenderPipelineDescriptor;
33 type CachedId = CachedRenderPipelineId;
34
35 fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId {
36 pipeline_cache.queue_render_pipeline(descriptor)
37 }
38
39 fn get_descriptor(
40 pipeline_cache: &PipelineCache,
41 id: CachedRenderPipelineId,
42 ) -> &Self::Descriptor {
43 pipeline_cache.get_render_pipeline_descriptor(id)
44 }
45}
46
47impl Specializable for ComputePipeline {
48 type Descriptor = ComputePipelineDescriptor;
49
50 type CachedId = CachedComputePipelineId;
51
52 fn queue(pipeline_cache: &PipelineCache, descriptor: Self::Descriptor) -> Self::CachedId {
53 pipeline_cache.queue_compute_pipeline(descriptor)
54 }
55
56 fn get_descriptor(
57 pipeline_cache: &PipelineCache,
58 id: CachedComputePipelineId,
59 ) -> &Self::Descriptor {
60 pipeline_cache.get_compute_pipeline_descriptor(id)
61 }
62}
63
64/// Defines a type capable of "specializing" values of a type T.
65///
66/// Specialization is the process of generating variants of a type T
67/// from small hashable keys, and specializers themselves can be
68/// thought of as [pure functions] from the key type to `T`, that
69/// [memoize] their results based on the key.
70///
71/// <div class="warning">
72/// Because specialization is designed for use with render and compute
73/// pipelines, specializers act on <i>descriptors</i> of <code>T</code> rather
74/// than produce <code>T</code> itself, but the above comparison is still valid.
75/// </div>
76///
77/// Since compiling render and compute pipelines can be so slow,
78/// specialization allows a Bevy app to detect when it would compile
79/// a duplicate pipeline and reuse what's already in the cache. While
80/// pipelines could all be memoized hashing each whole descriptor, this
81/// would be much slower and could still create duplicates. In contrast,
82/// memoizing groups of *related* pipelines based on a small hashable
83/// key is much faster. See the docs on [`SpecializerKey`] for more info.
84///
85/// ## Composing Specializers
86///
87/// This trait can be derived with `#[derive(Specializer)]` for structs whose
88/// fields all implement [`Specializer`]. This allows for composing multiple
89/// specializers together, and makes encapsulation and separating concerns
90/// between specializers much nicer. One could make individual specializers
91/// for common operations and place them in entirely separate modules, then
92/// compose them together with a single `#[derive]`
93///
94/// ```rust
95/// # use bevy_ecs::error::BevyError;
96/// # use bevy_render::render_resource::Specializer;
97/// # use bevy_render::render_resource::SpecializerKey;
98/// # use bevy_render::render_resource::RenderPipeline;
99/// # use bevy_render::render_resource::RenderPipelineDescriptor;
100/// struct A;
101/// struct B;
102/// #[derive(Copy, Clone, PartialEq, Eq, Hash, SpecializerKey)]
103/// struct BKey { contrived_number: u32 };
104///
105/// impl Specializer<RenderPipeline> for A {
106/// type Key = ();
107///
108/// fn specialize(
109/// &self,
110/// key: (),
111/// descriptor: &mut RenderPipelineDescriptor
112/// ) -> Result<(), BevyError> {
113/// # let _ = descriptor;
114/// // mutate the descriptor here
115/// Ok(key)
116/// }
117/// }
118///
119/// impl Specializer<RenderPipeline> for B {
120/// type Key = BKey;
121///
122/// fn specialize(
123/// &self,
124/// key: BKey,
125/// descriptor: &mut RenderPipelineDescriptor
126/// ) -> Result<BKey, BevyError> {
127/// # let _ = descriptor;
128/// // mutate the descriptor here
129/// Ok(key)
130/// }
131/// }
132///
133/// #[derive(Specializer)]
134/// #[specialize(RenderPipeline)]
135/// struct C {
136/// #[key(default)]
137/// a: A,
138/// b: B,
139/// }
140///
141/// /*
142/// The generated implementation:
143/// impl Specializer<RenderPipeline> for C {
144/// type Key = BKey;
145/// fn specialize(
146/// &self,
147/// key: Self::Key,
148/// descriptor: &mut RenderPipelineDescriptor
149/// ) -> Result<Canonical<Self::Key>, BevyError> {
150/// let _ = self.a.specialize((), descriptor);
151/// let key = self.b.specialize(key, descriptor);
152/// Ok(key)
153/// }
154/// }
155/// */
156/// ```
157///
158/// The key type for a composed specializer will be a tuple of the keys
159/// of each field, and their specialization logic will be applied in field
160/// order. Since derive macros can't have generic parameters, the derive macro
161/// requires an additional `#[specialize(..targets)]` attribute to specify a
162/// list of types to target for the implementation. `#[specialize(all)]` is
163/// also allowed, and will generate a fully generic implementation at the cost
164/// of slightly worse error messages.
165///
166/// Additionally, each field can optionally take a `#[key]` attribute to
167/// specify a "key override". This will hide that field's key from being
168/// exposed by the wrapper, and always use the value given by the attribute.
169/// Values for this attribute may either be `default` which will use the key's
170/// [`Default`] implementation, or a valid rust expression of the key type.
171///
172/// [pure functions]: https://en.wikipedia.org/wiki/Pure_function
173/// [memoize]: https://en.wikipedia.org/wiki/Memoization
174pub trait Specializer<T: Specializable>: Send + Sync + 'static {
175 type Key: SpecializerKey;
176 fn specialize(
177 &self,
178 key: Self::Key,
179 descriptor: &mut T::Descriptor,
180 ) -> Result<Canonical<Self::Key>, BevyError>;
181}
182
183// TODO: update docs for `SpecializerKey` with a more concrete example
184// once we've migrated mesh layout specialization
185
186/// Defines a type that is able to be used as a key for [`Specializer`]s
187///
188/// <div class = "warning">
189/// <strong>Most types should implement this trait with the included derive macro.</strong> <br/>
190/// This generates a "canonical" key type, with <code>IS_CANONICAL = true</code>, and <code>Canonical = Self</code>
191/// </div>
192///
193/// ## What's a "canonical" key?
194///
195/// The specialization API memoizes pipelines based on the hash of each key, but this
196/// can still produce duplicates. For example, if one used a list of vertex attributes
197/// as a key, even if all the same attributes were present they could be in any order.
198/// In each case, though the keys would be "different" they would produce the same
199/// pipeline.
200///
201/// To address this, during specialization keys are processed into a [canonical]
202/// (or "standard") form that represents the actual descriptor that was produced.
203/// In the previous example, that would be the final `VertexBufferLayout` contained
204/// by the pipeline descriptor. This new key is used by [`Variants`] to
205/// perform additional checks for duplicates, but only if required. If a key is
206/// canonical from the start, then there's no need.
207///
208/// For implementors: the main property of a canonical key is that if two keys hash
209/// differently, they should nearly always produce different descriptors.
210///
211/// [canonical]: https://en.wikipedia.org/wiki/Canonicalization
212pub trait SpecializerKey: Clone + Hash + Eq {
213 /// Denotes whether this key is canonical or not. This should only be `true`
214 /// if and only if `Canonical = Self`.
215 const IS_CANONICAL: bool;
216
217 /// The canonical key type to convert this into during specialization.
218 type Canonical: Hash + Eq;
219}
220
221pub type Canonical<T> = <T as SpecializerKey>::Canonical;
222
223impl<T: Specializable> Specializer<T> for () {
224 type Key = ();
225
226 fn specialize(
227 &self,
228 _key: Self::Key,
229 _descriptor: &mut T::Descriptor,
230 ) -> Result<(), BevyError> {
231 Ok(())
232 }
233}
234
235impl<T: Specializable, V: Send + Sync + 'static> Specializer<T> for PhantomData<V> {
236 type Key = ();
237
238 fn specialize(
239 &self,
240 _key: Self::Key,
241 _descriptor: &mut T::Descriptor,
242 ) -> Result<(), BevyError> {
243 Ok(())
244 }
245}
246
247macro_rules! impl_specialization_key_tuple {
248 ($(#[$meta:meta])* $($T:ident),*) => {
249 $(#[$meta])*
250 impl <$($T: SpecializerKey),*> SpecializerKey for ($($T,)*) {
251 const IS_CANONICAL: bool = true $(&& <$T as SpecializerKey>::IS_CANONICAL)*;
252 type Canonical = ($(Canonical<$T>,)*);
253 }
254 };
255}
256
257all_tuples!(
258 #[doc(fake_variadic)]
259 impl_specialization_key_tuple,
260 0,
261 12,
262 T
263);
264
265/// A cache for variants of a resource type created by a specializer.
266/// At most one resource will be created for each key.
267pub struct Variants<T: Specializable, S: Specializer<T>> {
268 specializer: S,
269 base_descriptor: T::Descriptor,
270 primary_cache: HashMap<S::Key, T::CachedId>,
271 secondary_cache: HashMap<Canonical<S::Key>, T::CachedId>,
272}
273
274impl<T: Specializable, S: Specializer<T>> Variants<T, S> {
275 /// Creates a new [`Variants`] from a [`Specializer`] and a base descriptor.
276 #[inline]
277 pub fn new(specializer: S, base_descriptor: T::Descriptor) -> Self {
278 Self {
279 specializer,
280 base_descriptor,
281 primary_cache: Default::default(),
282 secondary_cache: Default::default(),
283 }
284 }
285
286 /// Specializes a resource given the [`Specializer`]'s key type.
287 #[inline]
288 pub fn specialize(
289 &mut self,
290 pipeline_cache: &PipelineCache,
291 key: S::Key,
292 ) -> Result<T::CachedId, BevyError> {
293 let entry = self.primary_cache.entry(key.clone());
294 match entry {
295 Entry::Occupied(entry) => Ok(entry.get().clone()),
296 Entry::Vacant(entry) => Self::specialize_slow(
297 &self.specializer,
298 self.base_descriptor.clone(),
299 pipeline_cache,
300 key,
301 entry,
302 &mut self.secondary_cache,
303 ),
304 }
305 }
306
307 #[cold]
308 fn specialize_slow(
309 specializer: &S,
310 base_descriptor: T::Descriptor,
311 pipeline_cache: &PipelineCache,
312 key: S::Key,
313 primary_entry: VacantEntry<S::Key, T::CachedId, FixedHasher>,
314 secondary_cache: &mut HashMap<Canonical<S::Key>, T::CachedId>,
315 ) -> Result<T::CachedId, BevyError> {
316 let mut descriptor = base_descriptor.clone();
317 let canonical_key = specializer.specialize(key.clone(), &mut descriptor)?;
318
319 // if the whole key is canonical, the secondary cache isn't needed.
320 if <S::Key as SpecializerKey>::IS_CANONICAL {
321 return Ok(primary_entry
322 .insert(<T as Specializable>::queue(pipeline_cache, descriptor))
323 .clone());
324 }
325
326 let id = match secondary_cache.entry(canonical_key) {
327 Entry::Occupied(entry) => {
328 if cfg!(debug_assertions) {
329 let stored_descriptor =
330 <T as Specializable>::get_descriptor(pipeline_cache, entry.get().clone());
331 if &descriptor != stored_descriptor {
332 error!(
333 "Invalid Specializer<{}> impl for {}: the cached descriptor \
334 is not equal to the generated descriptor for the given key. \
335 This means the Specializer implementation uses unused information \
336 from the key to specialize the pipeline. This is not allowed \
337 because it would invalidate the cache.",
338 core::any::type_name::<T>(),
339 core::any::type_name::<S>()
340 );
341 }
342 }
343 entry.into_mut().clone()
344 }
345 Entry::Vacant(entry) => entry
346 .insert(<T as Specializable>::queue(pipeline_cache, descriptor))
347 .clone(),
348 };
349
350 primary_entry.insert(id.clone());
351 Ok(id)
352 }
353}