bevy_pbr/
material_bind_groups.rs

1//! Material bind group management for bindless resources.
2//!
3//! In bindless mode, Bevy's renderer groups materials into bind groups. This
4//! allocator manages each bind group, assigning slots to materials as
5//! appropriate.
6
7use crate::Material;
8use bevy_derive::{Deref, DerefMut};
9use bevy_ecs::{
10    resource::Resource,
11    system::{Commands, Res},
12};
13use bevy_platform::collections::{HashMap, HashSet};
14use bevy_reflect::{prelude::ReflectDefault, Reflect};
15use bevy_render::render_resource::{BindlessSlabResourceLimit, PipelineCache};
16use bevy_render::{
17    render_resource::{
18        BindGroup, BindGroupEntry, BindGroupLayoutDescriptor, BindingNumber, BindingResource,
19        BindingResources, BindlessDescriptor, BindlessIndex, BindlessIndexTableDescriptor,
20        BindlessResourceType, Buffer, BufferBinding, BufferDescriptor, BufferId,
21        BufferInitDescriptor, BufferUsages, CompareFunction, FilterMode, OwnedBindingResource,
22        PreparedBindGroup, RawBufferVec, Sampler, SamplerDescriptor, SamplerId, TextureView,
23        TextureViewDimension, TextureViewId, UnpreparedBindGroup, WgpuSampler, WgpuTextureView,
24    },
25    renderer::{RenderDevice, RenderQueue},
26    settings::WgpuFeatures,
27    texture::FallbackImage,
28};
29use bevy_utils::{default, TypeIdMap};
30use bytemuck::Pod;
31use core::hash::Hash;
32use core::{cmp::Ordering, iter, mem, ops::Range};
33use tracing::{error, trace};
34
35#[derive(Resource, Deref, DerefMut, Default)]
36pub struct MaterialBindGroupAllocators(TypeIdMap<MaterialBindGroupAllocator>);
37
38/// A resource that places materials into bind groups and tracks their
39/// resources.
40///
41/// Internally, Bevy has separate allocators for bindless and non-bindless
42/// materials. This resource provides a common interface to the specific
43/// allocator in use.
44pub enum MaterialBindGroupAllocator {
45    /// The allocator used when the material is bindless.
46    Bindless(Box<MaterialBindGroupBindlessAllocator>),
47    /// The allocator used when the material is non-bindless.
48    NonBindless(Box<MaterialBindGroupNonBindlessAllocator>),
49}
50
51/// The allocator that places bindless materials into bind groups and tracks
52/// their resources.
53pub struct MaterialBindGroupBindlessAllocator {
54    /// The label of the bind group allocator to use for allocated buffers.
55    label: &'static str,
56    /// The slabs, each of which contains a bind group.
57    slabs: Vec<MaterialBindlessSlab>,
58    /// The layout of the bind groups that we produce.
59    bind_group_layout: BindGroupLayoutDescriptor,
60    /// Information about the bindless resources in the material.
61    ///
62    /// We use this information to create and maintain bind groups.
63    bindless_descriptor: BindlessDescriptor,
64
65    /// Dummy buffers that we use to fill empty slots in buffer binding arrays.
66    ///
67    /// There's one fallback buffer for each buffer in the bind group, each
68    /// appropriately sized. Each buffer contains one uninitialized element of
69    /// the applicable type.
70    fallback_buffers: HashMap<BindlessIndex, Buffer>,
71
72    /// The maximum number of resources that can be stored in a slab.
73    ///
74    /// This corresponds to `SLAB_CAPACITY` in the `#[bindless(SLAB_CAPACITY)]`
75    /// attribute, when deriving `AsBindGroup`.
76    slab_capacity: u32,
77}
78
79/// A single bind group and the bookkeeping necessary to allocate into it.
80pub struct MaterialBindlessSlab {
81    /// The current bind group, if it's up to date.
82    ///
83    /// If this is `None`, then the bind group is dirty and needs to be
84    /// regenerated.
85    bind_group: Option<BindGroup>,
86
87    /// The GPU-accessible buffers that hold the mapping from binding index to
88    /// bindless slot.
89    ///
90    /// This is conventionally assigned to bind group binding 0, but it can be
91    /// changed using the `#[bindless(index_table(binding(B)))]` attribute on
92    /// `AsBindGroup`.
93    ///
94    /// Because the slab binary searches this table, the entries within must be
95    /// sorted by bindless index.
96    bindless_index_tables: Vec<MaterialBindlessIndexTable>,
97
98    /// The binding arrays containing samplers.
99    samplers: HashMap<BindlessResourceType, MaterialBindlessBindingArray<Sampler>>,
100    /// The binding arrays containing textures.
101    textures: HashMap<BindlessResourceType, MaterialBindlessBindingArray<TextureView>>,
102    /// The binding arrays containing buffers.
103    buffers: HashMap<BindlessIndex, MaterialBindlessBindingArray<Buffer>>,
104    /// The buffers that contain plain old data (i.e. the structure-level
105    /// `#[data]` attribute of `AsBindGroup`).
106    data_buffers: HashMap<BindlessIndex, MaterialDataBuffer>,
107
108    /// A list of free slot IDs.
109    free_slots: Vec<MaterialBindGroupSlot>,
110    /// The total number of materials currently allocated in this slab.
111    live_allocation_count: u32,
112    /// The total number of resources currently allocated in the binding arrays.
113    allocated_resource_count: u32,
114}
115
116/// A GPU-accessible buffer that holds the mapping from binding index to
117/// bindless slot.
118///
119/// This is conventionally assigned to bind group binding 0, but it can be
120/// changed by altering the [`Self::binding_number`], which corresponds to the
121/// `#[bindless(index_table(binding(B)))]` attribute in `AsBindGroup`.
122struct MaterialBindlessIndexTable {
123    /// The buffer containing the mappings.
124    buffer: RetainedRawBufferVec<u32>,
125    /// The range of bindless indices that this bindless index table covers.
126    ///
127    /// If this range is M..N, then the field at index $i$ maps to bindless
128    /// index $i$ + M. The size of this table is N - M.
129    ///
130    /// This corresponds to the `#[bindless(index_table(range(M..N)))]`
131    /// attribute in `AsBindGroup`.
132    index_range: Range<BindlessIndex>,
133    /// The binding number that this index table is assigned to in the shader.
134    binding_number: BindingNumber,
135}
136
137/// A single binding array for storing bindless resources and the bookkeeping
138/// necessary to allocate into it.
139struct MaterialBindlessBindingArray<R>
140where
141    R: GetBindingResourceId,
142{
143    /// The number of the binding that we attach this binding array to.
144    binding_number: BindingNumber,
145    /// A mapping from bindless slot index to the resource stored in that slot,
146    /// if any.
147    bindings: Vec<Option<MaterialBindlessBinding<R>>>,
148    /// The type of resource stored in this binding array.
149    resource_type: BindlessResourceType,
150    /// Maps a resource ID to the slot in which it's stored.
151    ///
152    /// This is essentially the inverse mapping of [`Self::bindings`].
153    resource_to_slot: HashMap<BindingResourceId, u32>,
154    /// A list of free slots in [`Self::bindings`] that contain no binding.
155    free_slots: Vec<u32>,
156    /// The number of allocated objects in this binding array.
157    len: u32,
158}
159
160/// A single resource (sampler, texture, or buffer) in a binding array.
161///
162/// Resources hold a reference count, which specifies the number of materials
163/// currently allocated within the slab that refer to this resource. When the
164/// reference count drops to zero, the resource is freed.
165struct MaterialBindlessBinding<R>
166where
167    R: GetBindingResourceId,
168{
169    /// The sampler, texture, or buffer.
170    resource: R,
171    /// The number of materials currently allocated within the containing slab
172    /// that use this resource.
173    ref_count: u32,
174}
175
176/// The allocator that stores bind groups for non-bindless materials.
177pub struct MaterialBindGroupNonBindlessAllocator {
178    /// The label of the bind group allocator to use for allocated buffers.
179    label: &'static str,
180    /// A mapping from [`MaterialBindGroupIndex`] to the bind group allocated in
181    /// each slot.
182    bind_groups: Vec<Option<MaterialNonBindlessAllocatedBindGroup>>,
183    /// The bind groups that are dirty and need to be prepared.
184    ///
185    /// To prepare the bind groups, call
186    /// [`MaterialBindGroupAllocator::prepare_bind_groups`].
187    to_prepare: HashSet<MaterialBindGroupIndex>,
188    /// A list of free bind group indices.
189    free_indices: Vec<MaterialBindGroupIndex>,
190}
191
192/// A single bind group that a [`MaterialBindGroupNonBindlessAllocator`] is
193/// currently managing.
194enum MaterialNonBindlessAllocatedBindGroup {
195    /// An unprepared bind group.
196    ///
197    /// The allocator prepares all outstanding unprepared bind groups when
198    /// [`MaterialBindGroupNonBindlessAllocator::prepare_bind_groups`] is
199    /// called.
200    Unprepared {
201        /// The unprepared bind group, including extra data.
202        bind_group: UnpreparedBindGroup,
203        /// The layout of that bind group.
204        layout: BindGroupLayoutDescriptor,
205    },
206    /// A bind group that's already been prepared.
207    Prepared {
208        bind_group: PreparedBindGroup,
209        #[expect(dead_code, reason = "These buffers are only referenced by bind groups")]
210        uniform_buffers: Vec<Buffer>,
211    },
212}
213
214/// Dummy instances of various resources that we fill unused slots in binding
215/// arrays with.
216#[derive(Resource)]
217pub struct FallbackBindlessResources {
218    /// A dummy filtering sampler.
219    filtering_sampler: Sampler,
220    /// A dummy non-filtering sampler.
221    non_filtering_sampler: Sampler,
222    /// A dummy comparison sampler.
223    comparison_sampler: Sampler,
224}
225
226/// The `wgpu` ID of a single bindless or non-bindless resource.
227#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
228enum BindingResourceId {
229    /// A buffer.
230    Buffer(BufferId),
231    /// A texture view, with the given dimension.
232    TextureView(TextureViewDimension, TextureViewId),
233    /// A sampler.
234    Sampler(SamplerId),
235    /// A buffer containing plain old data.
236    ///
237    /// This corresponds to the `#[data]` structure-level attribute on
238    /// `AsBindGroup`.
239    DataBuffer,
240}
241
242/// A temporary list of references to `wgpu` bindless resources.
243///
244/// We need this because the `wgpu` bindless API takes a slice of references.
245/// Thus we need to create intermediate vectors of bindless resources in order
246/// to satisfy `wgpu`'s lifetime requirements.
247enum BindingResourceArray<'a> {
248    /// A list of bindings.
249    Buffers(Vec<BufferBinding<'a>>),
250    /// A list of texture views.
251    TextureViews(Vec<&'a WgpuTextureView>),
252    /// A list of samplers.
253    Samplers(Vec<&'a WgpuSampler>),
254}
255
256/// The location of a material (either bindless or non-bindless) within the
257/// slabs.
258#[derive(Clone, Copy, Debug, Default, Reflect)]
259#[reflect(Clone, Default)]
260pub struct MaterialBindingId {
261    /// The index of the bind group (slab) where the GPU data is located.
262    pub group: MaterialBindGroupIndex,
263    /// The slot within that bind group.
264    ///
265    /// Non-bindless materials will always have a slot of 0.
266    pub slot: MaterialBindGroupSlot,
267}
268
269/// The index of each material bind group.
270///
271/// In bindless mode, each bind group contains multiple materials. In
272/// non-bindless mode, each bind group contains only one material.
273#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Reflect, Deref, DerefMut)]
274#[reflect(Default, Clone, PartialEq, Hash)]
275pub struct MaterialBindGroupIndex(pub u32);
276
277impl From<u32> for MaterialBindGroupIndex {
278    fn from(value: u32) -> Self {
279        MaterialBindGroupIndex(value)
280    }
281}
282
283/// The index of the slot containing material data within each material bind
284/// group.
285///
286/// In bindless mode, this slot is needed to locate the material data in each
287/// bind group, since multiple materials are packed into a single slab. In
288/// non-bindless mode, this slot is always 0.
289#[derive(Clone, Copy, Debug, Default, PartialEq, Reflect, Deref, DerefMut)]
290#[reflect(Default, Clone, PartialEq)]
291pub struct MaterialBindGroupSlot(pub u32);
292
293/// The CPU/GPU synchronization state of a buffer that we maintain.
294///
295/// Currently, the only buffer that we maintain is the
296/// [`MaterialBindlessIndexTable`].
297enum BufferDirtyState {
298    /// The buffer is currently synchronized between the CPU and GPU.
299    Clean,
300    /// The buffer hasn't been created yet.
301    NeedsReserve,
302    /// The buffer exists on both CPU and GPU, but the GPU data is out of date.
303    NeedsUpload,
304}
305
306/// Information that describes a potential allocation of an
307/// [`UnpreparedBindGroup`] into a slab.
308struct BindlessAllocationCandidate {
309    /// A map that, for every resource in the [`UnpreparedBindGroup`] that
310    /// already existed in this slab, maps bindless index of that resource to
311    /// its slot in the appropriate binding array.
312    pre_existing_resources: HashMap<BindlessIndex, u32>,
313    /// Stores the number of free slots that are needed to satisfy this
314    /// allocation.
315    needed_free_slots: u32,
316}
317
318/// A trait that allows fetching the [`BindingResourceId`] from a
319/// [`BindlessResourceType`].
320///
321/// This is used when freeing bindless resources, in order to locate the IDs
322/// assigned to each resource so that they can be removed from the appropriate
323/// maps.
324trait GetBindingResourceId {
325    /// Returns the [`BindingResourceId`] for this resource.
326    ///
327    /// `resource_type` specifies this resource's type. This is used for
328    /// textures, as a `wgpu` [`TextureView`] doesn't store enough information
329    /// itself to determine its dimension.
330    fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId;
331}
332
333/// The public interface to a slab, which represents a single bind group.
334pub struct MaterialSlab<'a>(MaterialSlabImpl<'a>);
335
336/// The actual implementation of a material slab.
337///
338/// This has bindless and non-bindless variants.
339enum MaterialSlabImpl<'a> {
340    /// The implementation of the slab interface we use when the slab
341    /// is bindless.
342    Bindless(&'a MaterialBindlessSlab),
343    /// The implementation of the slab interface we use when the slab
344    /// is non-bindless.
345    NonBindless(MaterialNonBindlessSlab<'a>),
346}
347
348/// A single bind group that the [`MaterialBindGroupNonBindlessAllocator`]
349/// manages.
350enum MaterialNonBindlessSlab<'a> {
351    /// A slab that has a bind group.
352    Prepared(&'a PreparedBindGroup),
353    /// A slab that doesn't yet have a bind group.
354    Unprepared,
355}
356
357/// Manages an array of untyped plain old data on GPU and allocates individual
358/// slots within that array.
359///
360/// This supports the `#[data]` attribute of `AsBindGroup`.
361struct MaterialDataBuffer {
362    /// The number of the binding that we attach this storage buffer to.
363    binding_number: BindingNumber,
364    /// The actual data.
365    ///
366    /// Note that this is untyped (`u8`); the actual aligned size of each
367    /// element is given by [`Self::aligned_element_size`];
368    buffer: RetainedRawBufferVec<u8>,
369    /// The size of each element in the buffer, including padding and alignment
370    /// if any.
371    aligned_element_size: u32,
372    /// A list of free slots within the buffer.
373    free_slots: Vec<u32>,
374    /// The actual number of slots that have been allocated.
375    len: u32,
376}
377
378/// A buffer containing plain old data, already packed into the appropriate GPU
379/// format, and that can be updated incrementally.
380///
381/// This structure exists in order to encapsulate the lazy update
382/// ([`BufferDirtyState`]) logic in a single place.
383#[derive(Deref, DerefMut)]
384struct RetainedRawBufferVec<T>
385where
386    T: Pod,
387{
388    /// The contents of the buffer.
389    #[deref]
390    buffer: RawBufferVec<T>,
391    /// Whether the contents of the buffer have been uploaded to the GPU.
392    dirty: BufferDirtyState,
393}
394
395/// The size of the buffer that we assign to unused buffer slots, in bytes.
396///
397/// This is essentially arbitrary, as it doesn't seem to matter to `wgpu` what
398/// the size is.
399const DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE: u64 = 16;
400
401impl From<u32> for MaterialBindGroupSlot {
402    fn from(value: u32) -> Self {
403        MaterialBindGroupSlot(value)
404    }
405}
406
407impl From<MaterialBindGroupSlot> for u32 {
408    fn from(value: MaterialBindGroupSlot) -> Self {
409        value.0
410    }
411}
412
413impl<'a> From<&'a OwnedBindingResource> for BindingResourceId {
414    fn from(value: &'a OwnedBindingResource) -> Self {
415        match *value {
416            OwnedBindingResource::Buffer(ref buffer) => BindingResourceId::Buffer(buffer.id()),
417            OwnedBindingResource::Data(_) => BindingResourceId::DataBuffer,
418            OwnedBindingResource::TextureView(ref texture_view_dimension, ref texture_view) => {
419                BindingResourceId::TextureView(*texture_view_dimension, texture_view.id())
420            }
421            OwnedBindingResource::Sampler(_, ref sampler) => {
422                BindingResourceId::Sampler(sampler.id())
423            }
424        }
425    }
426}
427
428impl GetBindingResourceId for Buffer {
429    fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId {
430        BindingResourceId::Buffer(self.id())
431    }
432}
433
434impl GetBindingResourceId for Sampler {
435    fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId {
436        BindingResourceId::Sampler(self.id())
437    }
438}
439
440impl GetBindingResourceId for TextureView {
441    fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId {
442        let texture_view_dimension = match resource_type {
443            BindlessResourceType::Texture1d => TextureViewDimension::D1,
444            BindlessResourceType::Texture2d => TextureViewDimension::D2,
445            BindlessResourceType::Texture2dArray => TextureViewDimension::D2Array,
446            BindlessResourceType::Texture3d => TextureViewDimension::D3,
447            BindlessResourceType::TextureCube => TextureViewDimension::Cube,
448            BindlessResourceType::TextureCubeArray => TextureViewDimension::CubeArray,
449            _ => panic!("Resource type is not a texture"),
450        };
451        BindingResourceId::TextureView(texture_view_dimension, self.id())
452    }
453}
454
455impl MaterialBindGroupAllocator {
456    /// Creates a new [`MaterialBindGroupAllocator`] managing the data for a
457    /// single material.
458    pub fn new(
459        render_device: &RenderDevice,
460        label: &'static str,
461        bindless_descriptor: Option<BindlessDescriptor>,
462        bind_group_layout: BindGroupLayoutDescriptor,
463        slab_capacity: Option<BindlessSlabResourceLimit>,
464    ) -> MaterialBindGroupAllocator {
465        if let Some(bindless_descriptor) = bindless_descriptor {
466            MaterialBindGroupAllocator::Bindless(Box::new(MaterialBindGroupBindlessAllocator::new(
467                render_device,
468                label,
469                bindless_descriptor,
470                bind_group_layout,
471                slab_capacity,
472            )))
473        } else {
474            MaterialBindGroupAllocator::NonBindless(Box::new(
475                MaterialBindGroupNonBindlessAllocator::new(label),
476            ))
477        }
478    }
479
480    /// Returns the slab with the given index, if one exists.
481    pub fn get(&self, group: MaterialBindGroupIndex) -> Option<MaterialSlab<'_>> {
482        match *self {
483            MaterialBindGroupAllocator::Bindless(ref bindless_allocator) => bindless_allocator
484                .get(group)
485                .map(|bindless_slab| MaterialSlab(MaterialSlabImpl::Bindless(bindless_slab))),
486            MaterialBindGroupAllocator::NonBindless(ref non_bindless_allocator) => {
487                non_bindless_allocator.get(group).map(|non_bindless_slab| {
488                    MaterialSlab(MaterialSlabImpl::NonBindless(non_bindless_slab))
489                })
490            }
491        }
492    }
493
494    /// Allocates an [`UnpreparedBindGroup`] and returns the resulting binding ID.
495    ///
496    /// This method should generally be preferred over
497    /// [`Self::allocate_prepared`], because this method supports both bindless
498    /// and non-bindless bind groups. Only use [`Self::allocate_prepared`] if
499    /// you need to prepare the bind group yourself.
500    pub fn allocate_unprepared(
501        &mut self,
502        unprepared_bind_group: UnpreparedBindGroup,
503        bind_group_layout: &BindGroupLayoutDescriptor,
504    ) -> MaterialBindingId {
505        match *self {
506            MaterialBindGroupAllocator::Bindless(
507                ref mut material_bind_group_bindless_allocator,
508            ) => material_bind_group_bindless_allocator.allocate_unprepared(unprepared_bind_group),
509            MaterialBindGroupAllocator::NonBindless(
510                ref mut material_bind_group_non_bindless_allocator,
511            ) => material_bind_group_non_bindless_allocator
512                .allocate_unprepared(unprepared_bind_group, (*bind_group_layout).clone()),
513        }
514    }
515
516    /// Places a pre-prepared bind group into a slab.
517    ///
518    /// For bindless materials, the allocator internally manages the bind
519    /// groups, so calling this method will panic if this is a bindless
520    /// allocator. Only non-bindless allocators support this method.
521    ///
522    /// It's generally preferred to use [`Self::allocate_unprepared`], because
523    /// that method supports both bindless and non-bindless allocators. Only use
524    /// this method if you need to prepare the bind group yourself.
525    pub fn allocate_prepared(
526        &mut self,
527        prepared_bind_group: PreparedBindGroup,
528    ) -> MaterialBindingId {
529        match *self {
530            MaterialBindGroupAllocator::Bindless(_) => {
531                panic!(
532                    "Bindless resources are incompatible with implementing `as_bind_group` \
533                     directly; implement `unprepared_bind_group` instead or disable bindless"
534                )
535            }
536            MaterialBindGroupAllocator::NonBindless(ref mut non_bindless_allocator) => {
537                non_bindless_allocator.allocate_prepared(prepared_bind_group)
538            }
539        }
540    }
541
542    /// Deallocates the material with the given binding ID.
543    ///
544    /// Any resources that are no longer referenced are removed from the slab.
545    pub fn free(&mut self, material_binding_id: MaterialBindingId) {
546        match *self {
547            MaterialBindGroupAllocator::Bindless(
548                ref mut material_bind_group_bindless_allocator,
549            ) => material_bind_group_bindless_allocator.free(material_binding_id),
550            MaterialBindGroupAllocator::NonBindless(
551                ref mut material_bind_group_non_bindless_allocator,
552            ) => material_bind_group_non_bindless_allocator.free(material_binding_id),
553        }
554    }
555
556    /// Recreates any bind groups corresponding to slabs that have been modified
557    /// since last calling [`MaterialBindGroupAllocator::prepare_bind_groups`].
558    pub fn prepare_bind_groups(
559        &mut self,
560        render_device: &RenderDevice,
561        pipeline_cache: &PipelineCache,
562        fallback_bindless_resources: &FallbackBindlessResources,
563        fallback_image: &FallbackImage,
564    ) {
565        match *self {
566            MaterialBindGroupAllocator::Bindless(
567                ref mut material_bind_group_bindless_allocator,
568            ) => material_bind_group_bindless_allocator.prepare_bind_groups(
569                render_device,
570                pipeline_cache,
571                fallback_bindless_resources,
572                fallback_image,
573            ),
574            MaterialBindGroupAllocator::NonBindless(
575                ref mut material_bind_group_non_bindless_allocator,
576            ) => material_bind_group_non_bindless_allocator
577                .prepare_bind_groups(render_device, pipeline_cache),
578        }
579    }
580
581    /// Uploads the contents of all buffers that this
582    /// [`MaterialBindGroupAllocator`] manages to the GPU.
583    ///
584    /// Non-bindless allocators don't currently manage any buffers, so this
585    /// method only has an effect for bindless allocators.
586    pub fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
587        match *self {
588            MaterialBindGroupAllocator::Bindless(
589                ref mut material_bind_group_bindless_allocator,
590            ) => material_bind_group_bindless_allocator.write_buffers(render_device, render_queue),
591            MaterialBindGroupAllocator::NonBindless(_) => {
592                // Not applicable.
593            }
594        }
595    }
596
597    /// Get number of allocated slabs for bindless material, returns 0 if it is
598    /// [`Self::NonBindless`].
599    pub fn slab_count(&self) -> usize {
600        match self {
601            Self::Bindless(bless) => bless.slabs.len(),
602            Self::NonBindless(_) => 0,
603        }
604    }
605
606    /// Get total size of slabs allocated for bindless material, returns 0 if it is
607    /// [`Self::NonBindless`].
608    pub fn slabs_size(&self) -> usize {
609        match self {
610            Self::Bindless(bless) => bless
611                .slabs
612                .iter()
613                .flat_map(|slab| {
614                    slab.data_buffers
615                        .iter()
616                        .map(|(_, buffer)| buffer.buffer.len())
617                })
618                .sum(),
619            Self::NonBindless(_) => 0,
620        }
621    }
622
623    /// Get number of bindless material allocations in slabs, returns 0 if it is
624    /// [`Self::NonBindless`].
625    pub fn allocations(&self) -> u64 {
626        match self {
627            Self::Bindless(bless) => bless
628                .slabs
629                .iter()
630                .map(|slab| u64::from(slab.allocated_resource_count))
631                .sum(),
632            Self::NonBindless(_) => 0,
633        }
634    }
635}
636
637impl MaterialBindlessIndexTable {
638    /// Creates a new [`MaterialBindlessIndexTable`] for a single slab.
639    fn new(
640        bindless_index_table_descriptor: &BindlessIndexTableDescriptor,
641    ) -> MaterialBindlessIndexTable {
642        // Preallocate space for one bindings table, so that there will always be a buffer.
643        let mut buffer = RetainedRawBufferVec::new(BufferUsages::STORAGE);
644        for _ in *bindless_index_table_descriptor.indices.start
645            ..*bindless_index_table_descriptor.indices.end
646        {
647            buffer.push(0);
648        }
649
650        MaterialBindlessIndexTable {
651            buffer,
652            index_range: bindless_index_table_descriptor.indices.clone(),
653            binding_number: bindless_index_table_descriptor.binding_number,
654        }
655    }
656
657    /// Returns the bindings in the binding index table.
658    ///
659    /// If the current [`MaterialBindlessIndexTable::index_range`] is M..N, then
660    /// element *i* of the returned binding index table contains the slot of the
661    /// bindless resource with bindless index *i* + M.
662    fn get(&self, slot: MaterialBindGroupSlot) -> &[u32] {
663        let struct_size = *self.index_range.end as usize - *self.index_range.start as usize;
664        let start = struct_size * slot.0 as usize;
665        &self.buffer.values()[start..(start + struct_size)]
666    }
667
668    /// Returns a single binding from the binding index table.
669    fn get_binding(
670        &self,
671        slot: MaterialBindGroupSlot,
672        bindless_index: BindlessIndex,
673    ) -> Option<u32> {
674        if bindless_index < self.index_range.start || bindless_index >= self.index_range.end {
675            return None;
676        }
677        self.get(slot)
678            .get((*bindless_index - *self.index_range.start) as usize)
679            .copied()
680    }
681
682    fn table_length(&self) -> u32 {
683        self.index_range.end.0 - self.index_range.start.0
684    }
685
686    /// Updates the binding index table for a single material.
687    ///
688    /// The `allocated_resource_slots` map contains a mapping from the
689    /// [`BindlessIndex`] of each resource that the material references to the
690    /// slot that that resource occupies in the appropriate binding array. This
691    /// method serializes that map into a binding index table that the shader
692    /// can read.
693    fn set(
694        &mut self,
695        slot: MaterialBindGroupSlot,
696        allocated_resource_slots: &HashMap<BindlessIndex, u32>,
697    ) {
698        let table_len = self.table_length() as usize;
699        let range = (slot.0 as usize * table_len)..((slot.0 as usize + 1) * table_len);
700        while self.buffer.len() < range.end {
701            self.buffer.push(0);
702        }
703
704        for (&bindless_index, &resource_slot) in allocated_resource_slots {
705            if self.index_range.contains(&bindless_index) {
706                self.buffer.set(
707                    *bindless_index + range.start as u32 - *self.index_range.start,
708                    resource_slot,
709                );
710            }
711        }
712
713        // Mark the buffer as needing to be recreated, in case we grew it.
714        self.buffer.dirty = BufferDirtyState::NeedsReserve;
715    }
716
717    /// Returns the [`BindGroupEntry`] for the index table itself.
718    fn bind_group_entry(&self) -> BindGroupEntry<'_> {
719        BindGroupEntry {
720            binding: *self.binding_number,
721            resource: self
722                .buffer
723                .buffer()
724                .expect("Bindings buffer must exist")
725                .as_entire_binding(),
726        }
727    }
728}
729
730impl<T> RetainedRawBufferVec<T>
731where
732    T: Pod,
733{
734    /// Creates a new empty [`RetainedRawBufferVec`] supporting the given
735    /// [`BufferUsages`].
736    fn new(buffer_usages: BufferUsages) -> RetainedRawBufferVec<T> {
737        RetainedRawBufferVec {
738            buffer: RawBufferVec::new(buffer_usages),
739            dirty: BufferDirtyState::NeedsUpload,
740        }
741    }
742
743    /// Recreates the GPU backing buffer if needed.
744    fn prepare(&mut self, render_device: &RenderDevice) {
745        match self.dirty {
746            BufferDirtyState::Clean | BufferDirtyState::NeedsUpload => {}
747            BufferDirtyState::NeedsReserve => {
748                let capacity = self.buffer.len();
749                self.buffer.reserve(capacity, render_device);
750                self.dirty = BufferDirtyState::NeedsUpload;
751            }
752        }
753    }
754
755    /// Writes the current contents of the buffer to the GPU if necessary.
756    fn write(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
757        match self.dirty {
758            BufferDirtyState::Clean => {}
759            BufferDirtyState::NeedsReserve | BufferDirtyState::NeedsUpload => {
760                self.buffer.write_buffer(render_device, render_queue);
761                self.dirty = BufferDirtyState::Clean;
762            }
763        }
764    }
765}
766
767impl MaterialBindGroupBindlessAllocator {
768    /// Creates a new [`MaterialBindGroupBindlessAllocator`] managing the data
769    /// for a single bindless material.
770    fn new(
771        render_device: &RenderDevice,
772        label: &'static str,
773        bindless_descriptor: BindlessDescriptor,
774        bind_group_layout: BindGroupLayoutDescriptor,
775        slab_capacity: Option<BindlessSlabResourceLimit>,
776    ) -> MaterialBindGroupBindlessAllocator {
777        let fallback_buffers = bindless_descriptor
778            .buffers
779            .iter()
780            .map(|bindless_buffer_descriptor| {
781                (
782                    bindless_buffer_descriptor.bindless_index,
783                    render_device.create_buffer(&BufferDescriptor {
784                        label: Some("bindless fallback buffer"),
785                        size: match bindless_buffer_descriptor.size {
786                            Some(size) => size as u64,
787                            None => DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE,
788                        },
789                        usage: BufferUsages::STORAGE,
790                        mapped_at_creation: false,
791                    }),
792                )
793            })
794            .collect();
795
796        MaterialBindGroupBindlessAllocator {
797            label,
798            slabs: vec![],
799            bind_group_layout,
800            bindless_descriptor,
801            fallback_buffers,
802            slab_capacity: slab_capacity
803                .expect("Non-bindless materials should use the non-bindless allocator")
804                .resolve(),
805        }
806    }
807
808    /// Allocates the resources for a single material into a slab and returns
809    /// the resulting ID.
810    ///
811    /// The returned [`MaterialBindingId`] can later be used to fetch the slab
812    /// that was used.
813    ///
814    /// This function can't fail. If all slabs are full, then a new slab is
815    /// created, and the material is allocated into it.
816    fn allocate_unprepared(
817        &mut self,
818        mut unprepared_bind_group: UnpreparedBindGroup,
819    ) -> MaterialBindingId {
820        for (slab_index, slab) in self.slabs.iter_mut().enumerate() {
821            trace!("Trying to allocate in slab {}", slab_index);
822            match slab.try_allocate(unprepared_bind_group, self.slab_capacity) {
823                Ok(slot) => {
824                    return MaterialBindingId {
825                        group: MaterialBindGroupIndex(slab_index as u32),
826                        slot,
827                    };
828                }
829                Err(bind_group) => unprepared_bind_group = bind_group,
830            }
831        }
832
833        let group = MaterialBindGroupIndex(self.slabs.len() as u32);
834        self.slabs
835            .push(MaterialBindlessSlab::new(&self.bindless_descriptor));
836
837        // Allocate into the newly-pushed slab.
838        let Ok(slot) = self
839            .slabs
840            .last_mut()
841            .expect("We just pushed a slab")
842            .try_allocate(unprepared_bind_group, self.slab_capacity)
843        else {
844            panic!("An allocation into an empty slab should always succeed")
845        };
846
847        MaterialBindingId { group, slot }
848    }
849
850    /// Deallocates the material with the given binding ID.
851    ///
852    /// Any resources that are no longer referenced are removed from the slab.
853    fn free(&mut self, material_binding_id: MaterialBindingId) {
854        self.slabs
855            .get_mut(material_binding_id.group.0 as usize)
856            .expect("Slab should exist")
857            .free(material_binding_id.slot, &self.bindless_descriptor);
858    }
859
860    /// Returns the slab with the given bind group index.
861    ///
862    /// A [`MaterialBindGroupIndex`] can be fetched from a
863    /// [`MaterialBindingId`].
864    fn get(&self, group: MaterialBindGroupIndex) -> Option<&MaterialBindlessSlab> {
865        self.slabs.get(group.0 as usize)
866    }
867
868    /// Recreates any bind groups corresponding to slabs that have been modified
869    /// since last calling
870    /// [`MaterialBindGroupBindlessAllocator::prepare_bind_groups`].
871    fn prepare_bind_groups(
872        &mut self,
873        render_device: &RenderDevice,
874        pipeline_cache: &PipelineCache,
875        fallback_bindless_resources: &FallbackBindlessResources,
876        fallback_image: &FallbackImage,
877    ) {
878        for slab in &mut self.slabs {
879            slab.prepare(
880                render_device,
881                pipeline_cache,
882                self.label,
883                &self.bind_group_layout,
884                fallback_bindless_resources,
885                &self.fallback_buffers,
886                fallback_image,
887                &self.bindless_descriptor,
888                self.slab_capacity,
889            );
890        }
891    }
892
893    /// Writes any buffers that we're managing to the GPU.
894    ///
895    /// Currently, this only consists of the bindless index tables.
896    fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
897        for slab in &mut self.slabs {
898            slab.write_buffer(render_device, render_queue);
899        }
900    }
901}
902
903impl MaterialBindlessSlab {
904    /// Attempts to allocate the given unprepared bind group in this slab.
905    ///
906    /// If the allocation succeeds, this method returns the slot that the
907    /// allocation was placed in. If the allocation fails because the slab was
908    /// full, this method returns the unprepared bind group back to the caller
909    /// so that it can try to allocate again.
910    fn try_allocate(
911        &mut self,
912        unprepared_bind_group: UnpreparedBindGroup,
913        slot_capacity: u32,
914    ) -> Result<MaterialBindGroupSlot, UnpreparedBindGroup> {
915        // Locate pre-existing resources, and determine how many free slots we need.
916        let Some(allocation_candidate) = self.check_allocation(&unprepared_bind_group) else {
917            return Err(unprepared_bind_group);
918        };
919
920        // Check to see if we have enough free space.
921        //
922        // As a special case, note that if *nothing* is allocated in this slab,
923        // then we always allow a material to be placed in it, regardless of the
924        // number of bindings the material has. This is so that, if the
925        // platform's maximum bindless count is set too low to hold even a
926        // single material, we can still place each material into a separate
927        // slab instead of failing outright.
928        if self.allocated_resource_count > 0
929            && self.allocated_resource_count + allocation_candidate.needed_free_slots
930                > slot_capacity
931        {
932            trace!("Slab is full, can't allocate");
933            return Err(unprepared_bind_group);
934        }
935
936        // OK, we can allocate in this slab. Assign a slot ID.
937        let slot = self
938            .free_slots
939            .pop()
940            .unwrap_or(MaterialBindGroupSlot(self.live_allocation_count));
941
942        // Bump the live allocation count.
943        self.live_allocation_count += 1;
944
945        // Insert the resources into the binding arrays.
946        let allocated_resource_slots =
947            self.insert_resources(unprepared_bind_group.bindings, allocation_candidate);
948
949        // Serialize the allocated resource slots.
950        for bindless_index_table in &mut self.bindless_index_tables {
951            bindless_index_table.set(slot, &allocated_resource_slots);
952        }
953
954        // Invalidate the cached bind group.
955        self.bind_group = None;
956
957        Ok(slot)
958    }
959
960    /// Gathers the information needed to determine whether the given unprepared
961    /// bind group can be allocated in this slab.
962    fn check_allocation(
963        &self,
964        unprepared_bind_group: &UnpreparedBindGroup,
965    ) -> Option<BindlessAllocationCandidate> {
966        let mut allocation_candidate = BindlessAllocationCandidate {
967            pre_existing_resources: HashMap::default(),
968            needed_free_slots: 0,
969        };
970
971        for &(bindless_index, ref owned_binding_resource) in unprepared_bind_group.bindings.iter() {
972            let bindless_index = BindlessIndex(bindless_index);
973            match *owned_binding_resource {
974                OwnedBindingResource::Buffer(ref buffer) => {
975                    let Some(binding_array) = self.buffers.get(&bindless_index) else {
976                        error!(
977                            "Binding array wasn't present for buffer at index {:?}",
978                            bindless_index
979                        );
980                        return None;
981                    };
982                    match binding_array.find(BindingResourceId::Buffer(buffer.id())) {
983                        Some(slot) => {
984                            allocation_candidate
985                                .pre_existing_resources
986                                .insert(bindless_index, slot);
987                        }
988                        None => allocation_candidate.needed_free_slots += 1,
989                    }
990                }
991
992                OwnedBindingResource::Data(_) => {
993                    // The size of a data buffer is unlimited.
994                }
995
996                OwnedBindingResource::TextureView(texture_view_dimension, ref texture_view) => {
997                    let bindless_resource_type = BindlessResourceType::from(texture_view_dimension);
998                    match self
999                        .textures
1000                        .get(&bindless_resource_type)
1001                        .expect("Missing binding array for texture")
1002                        .find(BindingResourceId::TextureView(
1003                            texture_view_dimension,
1004                            texture_view.id(),
1005                        )) {
1006                        Some(slot) => {
1007                            allocation_candidate
1008                                .pre_existing_resources
1009                                .insert(bindless_index, slot);
1010                        }
1011                        None => {
1012                            allocation_candidate.needed_free_slots += 1;
1013                        }
1014                    }
1015                }
1016
1017                OwnedBindingResource::Sampler(sampler_binding_type, ref sampler) => {
1018                    let bindless_resource_type = BindlessResourceType::from(sampler_binding_type);
1019                    match self
1020                        .samplers
1021                        .get(&bindless_resource_type)
1022                        .expect("Missing binding array for sampler")
1023                        .find(BindingResourceId::Sampler(sampler.id()))
1024                    {
1025                        Some(slot) => {
1026                            allocation_candidate
1027                                .pre_existing_resources
1028                                .insert(bindless_index, slot);
1029                        }
1030                        None => {
1031                            allocation_candidate.needed_free_slots += 1;
1032                        }
1033                    }
1034                }
1035            }
1036        }
1037
1038        Some(allocation_candidate)
1039    }
1040
1041    /// Inserts the given [`BindingResources`] into this slab.
1042    ///
1043    /// Returns a table that maps the bindless index of each resource to its
1044    /// slot in its binding array.
1045    fn insert_resources(
1046        &mut self,
1047        mut binding_resources: BindingResources,
1048        allocation_candidate: BindlessAllocationCandidate,
1049    ) -> HashMap<BindlessIndex, u32> {
1050        let mut allocated_resource_slots = HashMap::default();
1051
1052        for (bindless_index, owned_binding_resource) in binding_resources.drain(..) {
1053            let bindless_index = BindlessIndex(bindless_index);
1054
1055            let pre_existing_slot = allocation_candidate
1056                .pre_existing_resources
1057                .get(&bindless_index);
1058
1059            // Otherwise, we need to insert it anew.
1060            let binding_resource_id = BindingResourceId::from(&owned_binding_resource);
1061            let increment_allocated_resource_count = match owned_binding_resource {
1062                OwnedBindingResource::Buffer(buffer) => {
1063                    let slot = self
1064                        .buffers
1065                        .get_mut(&bindless_index)
1066                        .expect("Buffer binding array should exist")
1067                        .insert(binding_resource_id, buffer);
1068                    allocated_resource_slots.insert(bindless_index, slot);
1069
1070                    if let Some(pre_existing_slot) = pre_existing_slot {
1071                        assert_eq!(*pre_existing_slot, slot);
1072
1073                        false
1074                    } else {
1075                        true
1076                    }
1077                }
1078                OwnedBindingResource::Data(data) => {
1079                    if pre_existing_slot.is_some() {
1080                        panic!("Data buffers can't be deduplicated")
1081                    }
1082
1083                    let slot = self
1084                        .data_buffers
1085                        .get_mut(&bindless_index)
1086                        .expect("Data buffer binding array should exist")
1087                        .insert(&data);
1088                    allocated_resource_slots.insert(bindless_index, slot);
1089                    false
1090                }
1091                OwnedBindingResource::TextureView(texture_view_dimension, texture_view) => {
1092                    let bindless_resource_type = BindlessResourceType::from(texture_view_dimension);
1093                    let slot = self
1094                        .textures
1095                        .get_mut(&bindless_resource_type)
1096                        .expect("Texture array should exist")
1097                        .insert(binding_resource_id, texture_view);
1098                    allocated_resource_slots.insert(bindless_index, slot);
1099
1100                    if let Some(pre_existing_slot) = pre_existing_slot {
1101                        assert_eq!(*pre_existing_slot, slot);
1102
1103                        false
1104                    } else {
1105                        true
1106                    }
1107                }
1108                OwnedBindingResource::Sampler(sampler_binding_type, sampler) => {
1109                    let bindless_resource_type = BindlessResourceType::from(sampler_binding_type);
1110                    let slot = self
1111                        .samplers
1112                        .get_mut(&bindless_resource_type)
1113                        .expect("Sampler should exist")
1114                        .insert(binding_resource_id, sampler);
1115                    allocated_resource_slots.insert(bindless_index, slot);
1116
1117                    if let Some(pre_existing_slot) = pre_existing_slot {
1118                        assert_eq!(*pre_existing_slot, slot);
1119
1120                        false
1121                    } else {
1122                        true
1123                    }
1124                }
1125            };
1126
1127            // Bump the allocated resource count.
1128            if increment_allocated_resource_count {
1129                self.allocated_resource_count += 1;
1130            }
1131        }
1132
1133        allocated_resource_slots
1134    }
1135
1136    /// Removes the material allocated in the given slot, with the given
1137    /// descriptor, from this slab.
1138    fn free(&mut self, slot: MaterialBindGroupSlot, bindless_descriptor: &BindlessDescriptor) {
1139        // Loop through each binding.
1140        for (bindless_index, bindless_resource_type) in
1141            bindless_descriptor.resources.iter().enumerate()
1142        {
1143            let bindless_index = BindlessIndex::from(bindless_index as u32);
1144            let Some(bindless_index_table) = self.get_bindless_index_table(bindless_index) else {
1145                continue;
1146            };
1147            let Some(bindless_binding) = bindless_index_table.get_binding(slot, bindless_index)
1148            else {
1149                continue;
1150            };
1151
1152            // Free the binding. If the resource in question was anything other
1153            // than a data buffer, then it has a reference count and
1154            // consequently we need to decrement it.
1155            let decrement_allocated_resource_count = match *bindless_resource_type {
1156                BindlessResourceType::None => false,
1157                BindlessResourceType::Buffer => self
1158                    .buffers
1159                    .get_mut(&bindless_index)
1160                    .expect("Buffer should exist with that bindless index")
1161                    .remove(bindless_binding),
1162                BindlessResourceType::DataBuffer => {
1163                    self.data_buffers
1164                        .get_mut(&bindless_index)
1165                        .expect("Data buffer should exist with that bindless index")
1166                        .remove(bindless_binding);
1167                    false
1168                }
1169                BindlessResourceType::SamplerFiltering
1170                | BindlessResourceType::SamplerNonFiltering
1171                | BindlessResourceType::SamplerComparison => self
1172                    .samplers
1173                    .get_mut(bindless_resource_type)
1174                    .expect("Sampler array should exist")
1175                    .remove(bindless_binding),
1176                BindlessResourceType::Texture1d
1177                | BindlessResourceType::Texture2d
1178                | BindlessResourceType::Texture2dArray
1179                | BindlessResourceType::Texture3d
1180                | BindlessResourceType::TextureCube
1181                | BindlessResourceType::TextureCubeArray => self
1182                    .textures
1183                    .get_mut(bindless_resource_type)
1184                    .expect("Texture array should exist")
1185                    .remove(bindless_binding),
1186            };
1187
1188            // If the slot is now free, decrement the allocated resource
1189            // count.
1190            if decrement_allocated_resource_count {
1191                self.allocated_resource_count -= 1;
1192            }
1193        }
1194
1195        // Invalidate the cached bind group.
1196        self.bind_group = None;
1197
1198        // Release the slot ID.
1199        self.free_slots.push(slot);
1200        self.live_allocation_count -= 1;
1201    }
1202
1203    /// Recreates the bind group and bindless index table buffer if necessary.
1204    fn prepare(
1205        &mut self,
1206        render_device: &RenderDevice,
1207        pipeline_cache: &PipelineCache,
1208        label: &'static str,
1209        bind_group_layout: &BindGroupLayoutDescriptor,
1210        fallback_bindless_resources: &FallbackBindlessResources,
1211        fallback_buffers: &HashMap<BindlessIndex, Buffer>,
1212        fallback_image: &FallbackImage,
1213        bindless_descriptor: &BindlessDescriptor,
1214        slab_capacity: u32,
1215    ) {
1216        // Create the bindless index table buffers if needed.
1217        for bindless_index_table in &mut self.bindless_index_tables {
1218            bindless_index_table.buffer.prepare(render_device);
1219        }
1220
1221        // Create any data buffers we were managing if necessary.
1222        for data_buffer in self.data_buffers.values_mut() {
1223            data_buffer.buffer.prepare(render_device);
1224        }
1225
1226        // Create the bind group if needed.
1227        self.prepare_bind_group(
1228            render_device,
1229            pipeline_cache,
1230            label,
1231            bind_group_layout,
1232            fallback_bindless_resources,
1233            fallback_buffers,
1234            fallback_image,
1235            bindless_descriptor,
1236            slab_capacity,
1237        );
1238    }
1239
1240    /// Recreates the bind group if this slab has been changed since the last
1241    /// time we created it.
1242    fn prepare_bind_group(
1243        &mut self,
1244        render_device: &RenderDevice,
1245        pipeline_cache: &PipelineCache,
1246        label: &'static str,
1247        bind_group_layout: &BindGroupLayoutDescriptor,
1248        fallback_bindless_resources: &FallbackBindlessResources,
1249        fallback_buffers: &HashMap<BindlessIndex, Buffer>,
1250        fallback_image: &FallbackImage,
1251        bindless_descriptor: &BindlessDescriptor,
1252        slab_capacity: u32,
1253    ) {
1254        // If the bind group is clean, then do nothing.
1255        if self.bind_group.is_some() {
1256            return;
1257        }
1258
1259        // Determine whether we need to pad out our binding arrays with dummy
1260        // resources.
1261        let required_binding_array_size = if render_device
1262            .features()
1263            .contains(WgpuFeatures::PARTIALLY_BOUND_BINDING_ARRAY)
1264        {
1265            None
1266        } else {
1267            Some(slab_capacity)
1268        };
1269
1270        let binding_resource_arrays = self.create_binding_resource_arrays(
1271            fallback_bindless_resources,
1272            fallback_buffers,
1273            fallback_image,
1274            bindless_descriptor,
1275            required_binding_array_size,
1276        );
1277
1278        let mut bind_group_entries: Vec<_> = self
1279            .bindless_index_tables
1280            .iter()
1281            .map(|bindless_index_table| bindless_index_table.bind_group_entry())
1282            .collect();
1283
1284        for &(&binding, ref binding_resource_array) in binding_resource_arrays.iter() {
1285            bind_group_entries.push(BindGroupEntry {
1286                binding,
1287                resource: match *binding_resource_array {
1288                    BindingResourceArray::Buffers(ref buffer_bindings) => {
1289                        BindingResource::BufferArray(&buffer_bindings[..])
1290                    }
1291                    BindingResourceArray::TextureViews(ref texture_views) => {
1292                        BindingResource::TextureViewArray(&texture_views[..])
1293                    }
1294                    BindingResourceArray::Samplers(ref samplers) => {
1295                        BindingResource::SamplerArray(&samplers[..])
1296                    }
1297                },
1298            });
1299        }
1300
1301        // Create bind group entries for any data buffers we're managing.
1302        for data_buffer in self.data_buffers.values() {
1303            bind_group_entries.push(BindGroupEntry {
1304                binding: *data_buffer.binding_number,
1305                resource: data_buffer
1306                    .buffer
1307                    .buffer()
1308                    .expect("Backing data buffer must have been uploaded by now")
1309                    .as_entire_binding(),
1310            });
1311        }
1312
1313        self.bind_group = Some(render_device.create_bind_group(
1314            Some(label),
1315            &pipeline_cache.get_bind_group_layout(bind_group_layout),
1316            &bind_group_entries,
1317        ));
1318    }
1319
1320    /// Writes any buffers that we're managing to the GPU.
1321    ///
1322    /// Currently, this consists of the bindless index table plus any data
1323    /// buffers we're managing.
1324    fn write_buffer(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) {
1325        for bindless_index_table in &mut self.bindless_index_tables {
1326            bindless_index_table
1327                .buffer
1328                .write(render_device, render_queue);
1329        }
1330
1331        for data_buffer in self.data_buffers.values_mut() {
1332            data_buffer.buffer.write(render_device, render_queue);
1333        }
1334    }
1335
1336    /// Converts our binding arrays into binding resource arrays suitable for
1337    /// passing to `wgpu`.
1338    fn create_binding_resource_arrays<'a>(
1339        &'a self,
1340        fallback_bindless_resources: &'a FallbackBindlessResources,
1341        fallback_buffers: &'a HashMap<BindlessIndex, Buffer>,
1342        fallback_image: &'a FallbackImage,
1343        bindless_descriptor: &'a BindlessDescriptor,
1344        required_binding_array_size: Option<u32>,
1345    ) -> Vec<(&'a u32, BindingResourceArray<'a>)> {
1346        let mut binding_resource_arrays = vec![];
1347
1348        // Build sampler bindings.
1349        self.create_sampler_binding_resource_arrays(
1350            &mut binding_resource_arrays,
1351            fallback_bindless_resources,
1352            required_binding_array_size,
1353        );
1354
1355        // Build texture bindings.
1356        self.create_texture_binding_resource_arrays(
1357            &mut binding_resource_arrays,
1358            fallback_image,
1359            required_binding_array_size,
1360        );
1361
1362        // Build buffer bindings.
1363        self.create_buffer_binding_resource_arrays(
1364            &mut binding_resource_arrays,
1365            fallback_buffers,
1366            bindless_descriptor,
1367            required_binding_array_size,
1368        );
1369
1370        binding_resource_arrays
1371    }
1372
1373    /// Accumulates sampler binding arrays into binding resource arrays suitable
1374    /// for passing to `wgpu`.
1375    fn create_sampler_binding_resource_arrays<'a, 'b>(
1376        &'a self,
1377        binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>,
1378        fallback_bindless_resources: &'a FallbackBindlessResources,
1379        required_binding_array_size: Option<u32>,
1380    ) {
1381        // We have one binding resource array per sampler type.
1382        for (bindless_resource_type, fallback_sampler) in [
1383            (
1384                BindlessResourceType::SamplerFiltering,
1385                &fallback_bindless_resources.filtering_sampler,
1386            ),
1387            (
1388                BindlessResourceType::SamplerNonFiltering,
1389                &fallback_bindless_resources.non_filtering_sampler,
1390            ),
1391            (
1392                BindlessResourceType::SamplerComparison,
1393                &fallback_bindless_resources.comparison_sampler,
1394            ),
1395        ] {
1396            let mut sampler_bindings = vec![];
1397
1398            match self.samplers.get(&bindless_resource_type) {
1399                Some(sampler_bindless_binding_array) => {
1400                    for maybe_bindless_binding in sampler_bindless_binding_array.bindings.iter() {
1401                        match *maybe_bindless_binding {
1402                            Some(ref bindless_binding) => {
1403                                sampler_bindings.push(&*bindless_binding.resource);
1404                            }
1405                            None => sampler_bindings.push(&**fallback_sampler),
1406                        }
1407                    }
1408                }
1409
1410                None => {
1411                    // Fill with a single fallback sampler.
1412                    sampler_bindings.push(&**fallback_sampler);
1413                }
1414            }
1415
1416            if let Some(required_binding_array_size) = required_binding_array_size {
1417                sampler_bindings.extend(iter::repeat_n(
1418                    &**fallback_sampler,
1419                    required_binding_array_size as usize - sampler_bindings.len(),
1420                ));
1421            }
1422
1423            let binding_number = bindless_resource_type
1424                .binding_number()
1425                .expect("Sampler bindless resource type must have a binding number");
1426
1427            binding_resource_arrays.push((
1428                &**binding_number,
1429                BindingResourceArray::Samplers(sampler_bindings),
1430            ));
1431        }
1432    }
1433
1434    /// Accumulates texture binding arrays into binding resource arrays suitable
1435    /// for passing to `wgpu`.
1436    fn create_texture_binding_resource_arrays<'a, 'b>(
1437        &'a self,
1438        binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>,
1439        fallback_image: &'a FallbackImage,
1440        required_binding_array_size: Option<u32>,
1441    ) {
1442        for (bindless_resource_type, fallback_image) in [
1443            (BindlessResourceType::Texture1d, &fallback_image.d1),
1444            (BindlessResourceType::Texture2d, &fallback_image.d2),
1445            (
1446                BindlessResourceType::Texture2dArray,
1447                &fallback_image.d2_array,
1448            ),
1449            (BindlessResourceType::Texture3d, &fallback_image.d3),
1450            (BindlessResourceType::TextureCube, &fallback_image.cube),
1451            (
1452                BindlessResourceType::TextureCubeArray,
1453                &fallback_image.cube_array,
1454            ),
1455        ] {
1456            let mut texture_bindings = vec![];
1457
1458            let binding_number = bindless_resource_type
1459                .binding_number()
1460                .expect("Texture bindless resource type must have a binding number");
1461
1462            match self.textures.get(&bindless_resource_type) {
1463                Some(texture_bindless_binding_array) => {
1464                    for maybe_bindless_binding in texture_bindless_binding_array.bindings.iter() {
1465                        match *maybe_bindless_binding {
1466                            Some(ref bindless_binding) => {
1467                                texture_bindings.push(&*bindless_binding.resource);
1468                            }
1469                            None => texture_bindings.push(&*fallback_image.texture_view),
1470                        }
1471                    }
1472                }
1473
1474                None => {
1475                    // Fill with a single fallback image.
1476                    texture_bindings.push(&*fallback_image.texture_view);
1477                }
1478            }
1479
1480            if let Some(required_binding_array_size) = required_binding_array_size {
1481                texture_bindings.extend(iter::repeat_n(
1482                    &*fallback_image.texture_view,
1483                    required_binding_array_size as usize - texture_bindings.len(),
1484                ));
1485            }
1486
1487            binding_resource_arrays.push((
1488                binding_number,
1489                BindingResourceArray::TextureViews(texture_bindings),
1490            ));
1491        }
1492    }
1493
1494    /// Accumulates buffer binding arrays into binding resource arrays suitable
1495    /// for `wgpu`.
1496    fn create_buffer_binding_resource_arrays<'a, 'b>(
1497        &'a self,
1498        binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>,
1499        fallback_buffers: &'a HashMap<BindlessIndex, Buffer>,
1500        bindless_descriptor: &'a BindlessDescriptor,
1501        required_binding_array_size: Option<u32>,
1502    ) {
1503        for bindless_buffer_descriptor in bindless_descriptor.buffers.iter() {
1504            let Some(buffer_bindless_binding_array) =
1505                self.buffers.get(&bindless_buffer_descriptor.bindless_index)
1506            else {
1507                // This is OK, because index buffers are present in
1508                // `BindlessDescriptor::buffers` but not in
1509                // `BindlessDescriptor::resources`.
1510                continue;
1511            };
1512
1513            let fallback_buffer = fallback_buffers
1514                .get(&bindless_buffer_descriptor.bindless_index)
1515                .expect("Fallback buffer should exist");
1516
1517            let mut buffer_bindings: Vec<_> = buffer_bindless_binding_array
1518                .bindings
1519                .iter()
1520                .map(|maybe_bindless_binding| {
1521                    let buffer = match *maybe_bindless_binding {
1522                        None => fallback_buffer,
1523                        Some(ref bindless_binding) => &bindless_binding.resource,
1524                    };
1525                    BufferBinding {
1526                        buffer,
1527                        offset: 0,
1528                        size: None,
1529                    }
1530                })
1531                .collect();
1532
1533            if let Some(required_binding_array_size) = required_binding_array_size {
1534                buffer_bindings.extend(iter::repeat_n(
1535                    BufferBinding {
1536                        buffer: fallback_buffer,
1537                        offset: 0,
1538                        size: None,
1539                    },
1540                    required_binding_array_size as usize - buffer_bindings.len(),
1541                ));
1542            }
1543
1544            binding_resource_arrays.push((
1545                &*buffer_bindless_binding_array.binding_number,
1546                BindingResourceArray::Buffers(buffer_bindings),
1547            ));
1548        }
1549    }
1550
1551    /// Returns the [`BindGroup`] corresponding to this slab, if it's been
1552    /// prepared.
1553    fn bind_group(&self) -> Option<&BindGroup> {
1554        self.bind_group.as_ref()
1555    }
1556
1557    /// Returns the bindless index table containing the given bindless index.
1558    fn get_bindless_index_table(
1559        &self,
1560        bindless_index: BindlessIndex,
1561    ) -> Option<&MaterialBindlessIndexTable> {
1562        let table_index = self
1563            .bindless_index_tables
1564            .binary_search_by(|bindless_index_table| {
1565                if bindless_index < bindless_index_table.index_range.start {
1566                    Ordering::Less
1567                } else if bindless_index >= bindless_index_table.index_range.end {
1568                    Ordering::Greater
1569                } else {
1570                    Ordering::Equal
1571                }
1572            })
1573            .ok()?;
1574        self.bindless_index_tables.get(table_index)
1575    }
1576}
1577
1578impl<R> MaterialBindlessBindingArray<R>
1579where
1580    R: GetBindingResourceId,
1581{
1582    /// Creates a new [`MaterialBindlessBindingArray`] with the given binding
1583    /// number, managing resources of the given type.
1584    fn new(
1585        binding_number: BindingNumber,
1586        resource_type: BindlessResourceType,
1587    ) -> MaterialBindlessBindingArray<R> {
1588        MaterialBindlessBindingArray {
1589            binding_number,
1590            bindings: vec![],
1591            resource_type,
1592            resource_to_slot: HashMap::default(),
1593            free_slots: vec![],
1594            len: 0,
1595        }
1596    }
1597
1598    /// Returns the slot corresponding to the given resource, if that resource
1599    /// is located in this binding array.
1600    ///
1601    /// If the resource isn't in this binding array, this method returns `None`.
1602    fn find(&self, binding_resource_id: BindingResourceId) -> Option<u32> {
1603        self.resource_to_slot.get(&binding_resource_id).copied()
1604    }
1605
1606    /// Inserts a bindless resource into a binding array and returns the index
1607    /// of the slot it was inserted into.
1608    fn insert(&mut self, binding_resource_id: BindingResourceId, resource: R) -> u32 {
1609        match self.resource_to_slot.entry(binding_resource_id) {
1610            bevy_platform::collections::hash_map::Entry::Occupied(o) => {
1611                let slot = *o.get();
1612
1613                self.bindings[slot as usize]
1614                    .as_mut()
1615                    .expect("A slot in the resource_to_slot map should have a value")
1616                    .ref_count += 1;
1617
1618                slot
1619            }
1620            bevy_platform::collections::hash_map::Entry::Vacant(v) => {
1621                let slot = self.free_slots.pop().unwrap_or(self.len);
1622                v.insert(slot);
1623
1624                if self.bindings.len() < slot as usize + 1 {
1625                    self.bindings.resize_with(slot as usize + 1, || None);
1626                }
1627                self.bindings[slot as usize] = Some(MaterialBindlessBinding::new(resource));
1628
1629                self.len += 1;
1630                slot
1631            }
1632        }
1633    }
1634
1635    /// Removes a reference to an object from the slot.
1636    ///
1637    /// If the reference count dropped to 0 and the object was freed, this
1638    /// method returns true. If the object was still referenced after removing
1639    /// it, returns false.
1640    fn remove(&mut self, slot: u32) -> bool {
1641        let maybe_binding = &mut self.bindings[slot as usize];
1642        let binding = maybe_binding
1643            .as_mut()
1644            .expect("Attempted to free an already-freed binding");
1645
1646        binding.ref_count -= 1;
1647        if binding.ref_count != 0 {
1648            return false;
1649        }
1650
1651        let binding_resource_id = binding.resource.binding_resource_id(self.resource_type);
1652        self.resource_to_slot.remove(&binding_resource_id);
1653
1654        *maybe_binding = None;
1655        self.free_slots.push(slot);
1656        self.len -= 1;
1657        true
1658    }
1659}
1660
1661impl<R> MaterialBindlessBinding<R>
1662where
1663    R: GetBindingResourceId,
1664{
1665    /// Creates a new [`MaterialBindlessBinding`] for a freshly-added resource.
1666    ///
1667    /// The reference count is initialized to 1.
1668    fn new(resource: R) -> MaterialBindlessBinding<R> {
1669        MaterialBindlessBinding {
1670            resource,
1671            ref_count: 1,
1672        }
1673    }
1674}
1675
1676/// Returns true if the material will *actually* use bindless resources or false
1677/// if it won't.
1678///
1679/// This takes the platform support (or lack thereof) for bindless resources
1680/// into account.
1681pub fn material_uses_bindless_resources<M>(render_device: &RenderDevice) -> bool
1682where
1683    M: Material,
1684{
1685    M::bindless_slot_count().is_some_and(|bindless_slot_count| {
1686        M::bindless_supported(render_device) && bindless_slot_count.resolve() > 1
1687    })
1688}
1689
1690impl MaterialBindlessSlab {
1691    /// Creates a new [`MaterialBindlessSlab`] for a material with the given
1692    /// bindless descriptor.
1693    ///
1694    /// We use this when no existing slab could hold a material to be allocated.
1695    fn new(bindless_descriptor: &BindlessDescriptor) -> MaterialBindlessSlab {
1696        let mut buffers = HashMap::default();
1697        let mut samplers = HashMap::default();
1698        let mut textures = HashMap::default();
1699        let mut data_buffers = HashMap::default();
1700
1701        for (bindless_index, bindless_resource_type) in
1702            bindless_descriptor.resources.iter().enumerate()
1703        {
1704            let bindless_index = BindlessIndex(bindless_index as u32);
1705            match *bindless_resource_type {
1706                BindlessResourceType::None => {}
1707                BindlessResourceType::Buffer => {
1708                    let binding_number = bindless_descriptor
1709                        .buffers
1710                        .iter()
1711                        .find(|bindless_buffer_descriptor| {
1712                            bindless_buffer_descriptor.bindless_index == bindless_index
1713                        })
1714                        .expect(
1715                            "Bindless buffer descriptor matching that bindless index should be \
1716                             present",
1717                        )
1718                        .binding_number;
1719                    buffers.insert(
1720                        bindless_index,
1721                        MaterialBindlessBindingArray::new(binding_number, *bindless_resource_type),
1722                    );
1723                }
1724                BindlessResourceType::DataBuffer => {
1725                    // Copy the data in.
1726                    let buffer_descriptor = bindless_descriptor
1727                        .buffers
1728                        .iter()
1729                        .find(|bindless_buffer_descriptor| {
1730                            bindless_buffer_descriptor.bindless_index == bindless_index
1731                        })
1732                        .expect(
1733                            "Bindless buffer descriptor matching that bindless index should be \
1734                             present",
1735                        );
1736                    data_buffers.insert(
1737                        bindless_index,
1738                        MaterialDataBuffer::new(
1739                            buffer_descriptor.binding_number,
1740                            buffer_descriptor
1741                                .size
1742                                .expect("Data buffers should have a size")
1743                                as u32,
1744                        ),
1745                    );
1746                }
1747                BindlessResourceType::SamplerFiltering
1748                | BindlessResourceType::SamplerNonFiltering
1749                | BindlessResourceType::SamplerComparison => {
1750                    samplers.insert(
1751                        *bindless_resource_type,
1752                        MaterialBindlessBindingArray::new(
1753                            *bindless_resource_type.binding_number().unwrap(),
1754                            *bindless_resource_type,
1755                        ),
1756                    );
1757                }
1758                BindlessResourceType::Texture1d
1759                | BindlessResourceType::Texture2d
1760                | BindlessResourceType::Texture2dArray
1761                | BindlessResourceType::Texture3d
1762                | BindlessResourceType::TextureCube
1763                | BindlessResourceType::TextureCubeArray => {
1764                    textures.insert(
1765                        *bindless_resource_type,
1766                        MaterialBindlessBindingArray::new(
1767                            *bindless_resource_type.binding_number().unwrap(),
1768                            *bindless_resource_type,
1769                        ),
1770                    );
1771                }
1772            }
1773        }
1774
1775        let bindless_index_tables = bindless_descriptor
1776            .index_tables
1777            .iter()
1778            .map(MaterialBindlessIndexTable::new)
1779            .collect();
1780
1781        MaterialBindlessSlab {
1782            bind_group: None,
1783            bindless_index_tables,
1784            samplers,
1785            textures,
1786            buffers,
1787            data_buffers,
1788            free_slots: vec![],
1789            live_allocation_count: 0,
1790            allocated_resource_count: 0,
1791        }
1792    }
1793}
1794
1795pub fn init_fallback_bindless_resources(mut commands: Commands, render_device: Res<RenderDevice>) {
1796    commands.insert_resource(FallbackBindlessResources {
1797        filtering_sampler: render_device.create_sampler(&SamplerDescriptor {
1798            label: Some("fallback filtering sampler"),
1799            ..default()
1800        }),
1801        non_filtering_sampler: render_device.create_sampler(&SamplerDescriptor {
1802            label: Some("fallback non-filtering sampler"),
1803            mag_filter: FilterMode::Nearest,
1804            min_filter: FilterMode::Nearest,
1805            mipmap_filter: FilterMode::Nearest,
1806            ..default()
1807        }),
1808        comparison_sampler: render_device.create_sampler(&SamplerDescriptor {
1809            label: Some("fallback comparison sampler"),
1810            compare: Some(CompareFunction::Always),
1811            ..default()
1812        }),
1813    });
1814}
1815
1816impl MaterialBindGroupNonBindlessAllocator {
1817    /// Creates a new [`MaterialBindGroupNonBindlessAllocator`] managing the
1818    /// bind groups for a single non-bindless material.
1819    fn new(label: &'static str) -> MaterialBindGroupNonBindlessAllocator {
1820        MaterialBindGroupNonBindlessAllocator {
1821            label,
1822            bind_groups: vec![],
1823            to_prepare: HashSet::default(),
1824            free_indices: vec![],
1825        }
1826    }
1827
1828    /// Inserts a bind group, either unprepared or prepared, into this allocator
1829    /// and returns a [`MaterialBindingId`].
1830    ///
1831    /// The returned [`MaterialBindingId`] can later be used to fetch the bind
1832    /// group.
1833    fn allocate(&mut self, bind_group: MaterialNonBindlessAllocatedBindGroup) -> MaterialBindingId {
1834        let group_id = self
1835            .free_indices
1836            .pop()
1837            .unwrap_or(MaterialBindGroupIndex(self.bind_groups.len() as u32));
1838        if self.bind_groups.len() < *group_id as usize + 1 {
1839            self.bind_groups
1840                .resize_with(*group_id as usize + 1, || None);
1841        }
1842
1843        if matches!(
1844            bind_group,
1845            MaterialNonBindlessAllocatedBindGroup::Unprepared { .. }
1846        ) {
1847            self.to_prepare.insert(group_id);
1848        }
1849
1850        self.bind_groups[*group_id as usize] = Some(bind_group);
1851
1852        MaterialBindingId {
1853            group: group_id,
1854            slot: default(),
1855        }
1856    }
1857
1858    /// Inserts an unprepared bind group into this allocator and returns a
1859    /// [`MaterialBindingId`].
1860    fn allocate_unprepared(
1861        &mut self,
1862        unprepared_bind_group: UnpreparedBindGroup,
1863        bind_group_layout: BindGroupLayoutDescriptor,
1864    ) -> MaterialBindingId {
1865        self.allocate(MaterialNonBindlessAllocatedBindGroup::Unprepared {
1866            bind_group: unprepared_bind_group,
1867            layout: bind_group_layout,
1868        })
1869    }
1870
1871    /// Inserts an prepared bind group into this allocator and returns a
1872    /// [`MaterialBindingId`].
1873    fn allocate_prepared(&mut self, prepared_bind_group: PreparedBindGroup) -> MaterialBindingId {
1874        self.allocate(MaterialNonBindlessAllocatedBindGroup::Prepared {
1875            bind_group: prepared_bind_group,
1876            uniform_buffers: vec![],
1877        })
1878    }
1879
1880    /// Deallocates the bind group with the given binding ID.
1881    fn free(&mut self, binding_id: MaterialBindingId) {
1882        debug_assert_eq!(binding_id.slot, MaterialBindGroupSlot(0));
1883        debug_assert!(self.bind_groups[*binding_id.group as usize].is_some());
1884        self.bind_groups[*binding_id.group as usize] = None;
1885        self.to_prepare.remove(&binding_id.group);
1886        self.free_indices.push(binding_id.group);
1887    }
1888
1889    /// Returns a wrapper around the bind group with the given index.
1890    fn get(&self, group: MaterialBindGroupIndex) -> Option<MaterialNonBindlessSlab<'_>> {
1891        self.bind_groups[group.0 as usize]
1892            .as_ref()
1893            .map(|bind_group| match bind_group {
1894                MaterialNonBindlessAllocatedBindGroup::Prepared { bind_group, .. } => {
1895                    MaterialNonBindlessSlab::Prepared(bind_group)
1896                }
1897                MaterialNonBindlessAllocatedBindGroup::Unprepared { .. } => {
1898                    MaterialNonBindlessSlab::Unprepared
1899                }
1900            })
1901    }
1902
1903    /// Prepares any as-yet unprepared bind groups that this allocator is
1904    /// managing.
1905    ///
1906    /// Unprepared bind groups can be added to this allocator with
1907    /// [`Self::allocate_unprepared`]. Such bind groups will defer being
1908    /// prepared until the next time this method is called.
1909    fn prepare_bind_groups(
1910        &mut self,
1911        render_device: &RenderDevice,
1912        pipeline_cache: &PipelineCache,
1913    ) {
1914        for bind_group_index in mem::take(&mut self.to_prepare) {
1915            let Some(MaterialNonBindlessAllocatedBindGroup::Unprepared {
1916                bind_group: unprepared_bind_group,
1917                layout: bind_group_layout,
1918            }) = mem::take(&mut self.bind_groups[*bind_group_index as usize])
1919            else {
1920                panic!("Allocation didn't exist or was already prepared");
1921            };
1922
1923            // Pack any `Data` into uniform buffers.
1924            let mut uniform_buffers = vec![];
1925            for (index, binding) in unprepared_bind_group.bindings.iter() {
1926                let OwnedBindingResource::Data(ref owned_data) = *binding else {
1927                    continue;
1928                };
1929                let label = format!("material uniform data {}", *index);
1930                let uniform_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor {
1931                    label: Some(&label),
1932                    contents: &owned_data.0,
1933                    usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM,
1934                });
1935                uniform_buffers.push(uniform_buffer);
1936            }
1937
1938            // Create bind group entries.
1939            let mut bind_group_entries = vec![];
1940            let mut uniform_buffers_iter = uniform_buffers.iter();
1941            for (index, binding) in unprepared_bind_group.bindings.iter() {
1942                match *binding {
1943                    OwnedBindingResource::Data(_) => {
1944                        bind_group_entries.push(BindGroupEntry {
1945                            binding: *index,
1946                            resource: uniform_buffers_iter
1947                                .next()
1948                                .expect("We should have created uniform buffers for each `Data`")
1949                                .as_entire_binding(),
1950                        });
1951                    }
1952                    _ => bind_group_entries.push(BindGroupEntry {
1953                        binding: *index,
1954                        resource: binding.get_binding(),
1955                    }),
1956                }
1957            }
1958
1959            // Create the bind group.
1960            let bind_group = render_device.create_bind_group(
1961                self.label,
1962                &pipeline_cache.get_bind_group_layout(&bind_group_layout),
1963                &bind_group_entries,
1964            );
1965
1966            self.bind_groups[*bind_group_index as usize] =
1967                Some(MaterialNonBindlessAllocatedBindGroup::Prepared {
1968                    bind_group: PreparedBindGroup {
1969                        bindings: unprepared_bind_group.bindings,
1970                        bind_group,
1971                    },
1972                    uniform_buffers,
1973                });
1974        }
1975    }
1976}
1977
1978impl<'a> MaterialSlab<'a> {
1979    /// Returns the [`BindGroup`] corresponding to this slab, if it's been
1980    /// prepared.
1981    ///
1982    /// You can prepare bind groups by calling
1983    /// [`MaterialBindGroupAllocator::prepare_bind_groups`]. If the bind group
1984    /// isn't ready, this method returns `None`.
1985    pub fn bind_group(&self) -> Option<&'a BindGroup> {
1986        match self.0 {
1987            MaterialSlabImpl::Bindless(material_bindless_slab) => {
1988                material_bindless_slab.bind_group()
1989            }
1990            MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Prepared(
1991                prepared_bind_group,
1992            )) => Some(&prepared_bind_group.bind_group),
1993            MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Unprepared) => None,
1994        }
1995    }
1996}
1997
1998impl MaterialDataBuffer {
1999    /// Creates a new [`MaterialDataBuffer`] managing a buffer of elements of
2000    /// size `aligned_element_size` that will be bound to the given binding
2001    /// number.
2002    fn new(binding_number: BindingNumber, aligned_element_size: u32) -> MaterialDataBuffer {
2003        MaterialDataBuffer {
2004            binding_number,
2005            buffer: RetainedRawBufferVec::new(BufferUsages::STORAGE),
2006            aligned_element_size,
2007            free_slots: vec![],
2008            len: 0,
2009        }
2010    }
2011
2012    /// Allocates a slot for a new piece of data, copies the data into that
2013    /// slot, and returns the slot ID.
2014    ///
2015    /// The size of the piece of data supplied to this method must equal the
2016    /// [`Self::aligned_element_size`] provided to [`MaterialDataBuffer::new`].
2017    fn insert(&mut self, data: &[u8]) -> u32 {
2018        // Make sure the data is of the right length.
2019        debug_assert_eq!(data.len(), self.aligned_element_size as usize);
2020
2021        // Grab a slot.
2022        let slot = self.free_slots.pop().unwrap_or(self.len);
2023
2024        // Calculate the range we're going to copy to.
2025        let start = slot as usize * self.aligned_element_size as usize;
2026        let end = (slot as usize + 1) * self.aligned_element_size as usize;
2027
2028        // Resize the buffer if necessary.
2029        if self.buffer.len() < end {
2030            self.buffer.reserve_internal(end);
2031        }
2032        while self.buffer.values().len() < end {
2033            self.buffer.push(0);
2034        }
2035
2036        // Copy in the data.
2037        self.buffer.values_mut()[start..end].copy_from_slice(data);
2038
2039        // Mark the buffer dirty, and finish up.
2040        self.len += 1;
2041        self.buffer.dirty = BufferDirtyState::NeedsReserve;
2042        slot
2043    }
2044
2045    /// Marks the given slot as free.
2046    fn remove(&mut self, slot: u32) {
2047        self.free_slots.push(slot);
2048        self.len -= 1;
2049    }
2050}