bevy_render/mesh/
allocator.rs

1//! Manages mesh vertex and index buffers.
2
3use alloc::vec::Vec;
4use bevy_mesh::Indices;
5use core::{
6    fmt::{self, Display, Formatter},
7    ops::Range,
8};
9use nonmax::NonMaxU32;
10
11use bevy_app::{App, Plugin};
12use bevy_asset::AssetId;
13use bevy_derive::{Deref, DerefMut};
14use bevy_ecs::{
15    resource::Resource,
16    schedule::IntoScheduleConfigs as _,
17    system::{Res, ResMut},
18    world::{FromWorld, World},
19};
20use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet};
21use bevy_utils::default;
22use offset_allocator::{Allocation, Allocator};
23use tracing::error;
24use wgpu::{
25    BufferDescriptor, BufferSize, BufferUsages, CommandEncoderDescriptor, DownlevelFlags,
26    COPY_BUFFER_ALIGNMENT,
27};
28
29use crate::{
30    mesh::{Mesh, MeshVertexBufferLayouts, RenderMesh},
31    render_asset::{prepare_assets, ExtractedAssets},
32    render_resource::Buffer,
33    renderer::{RenderAdapter, RenderDevice, RenderQueue},
34    Render, RenderApp, RenderSystems,
35};
36
37/// A plugin that manages GPU memory for mesh data.
38pub struct MeshAllocatorPlugin;
39
40/// Manages the assignment of mesh data to GPU buffers.
41///
42/// The Bevy renderer tries to pack vertex and index data for multiple meshes
43/// together so that multiple meshes can be drawn back-to-back without any
44/// rebinding. This resource manages these buffers.
45///
46/// Within each slab, or hardware buffer, the underlying allocation algorithm is
47/// [`offset_allocator`], a Rust port of Sebastian Aaltonen's hard-real-time C++
48/// `OffsetAllocator`. Slabs start small and then grow as their contents fill
49/// up, up to a maximum size limit. To reduce fragmentation, vertex and index
50/// buffers that are too large bypass this system and receive their own buffers.
51///
52/// The [`MeshAllocatorSettings`] allows you to tune the behavior of the
53/// allocator for better performance with your application. Most applications
54/// won't need to change the settings from their default values.
55#[derive(Resource)]
56pub struct MeshAllocator {
57    /// Holds all buffers and allocators.
58    slabs: HashMap<SlabId, Slab>,
59
60    /// Maps a layout to the slabs that hold elements of that layout.
61    ///
62    /// This is used when allocating, so that we can find the appropriate slab
63    /// to place an object in.
64    slab_layouts: HashMap<ElementLayout, Vec<SlabId>>,
65
66    /// Maps mesh asset IDs to the ID of the slabs that hold their vertex data.
67    mesh_id_to_vertex_slab: HashMap<AssetId<Mesh>, SlabId>,
68
69    /// Maps mesh asset IDs to the ID of the slabs that hold their index data.
70    mesh_id_to_index_slab: HashMap<AssetId<Mesh>, SlabId>,
71
72    /// The next slab ID to assign.
73    next_slab_id: SlabId,
74
75    /// Whether we can pack multiple vertex arrays into a single slab on this
76    /// platform.
77    ///
78    /// This corresponds to [`DownlevelFlags::BASE_VERTEX`], which is unset on
79    /// WebGL 2. On this platform, we must give each vertex array its own
80    /// buffer, because we can't adjust the first vertex when we perform a draw.
81    general_vertex_slabs_supported: bool,
82
83    /// Additional buffer usages to add to any vertex or index buffers created.
84    pub extra_buffer_usages: BufferUsages,
85}
86
87/// Tunable parameters that customize the behavior of the allocator.
88///
89/// Generally, these parameters adjust the tradeoff between memory fragmentation
90/// and performance. You can adjust them as desired for your application. Most
91/// applications can stick with the default values.
92#[derive(Resource)]
93pub struct MeshAllocatorSettings {
94    /// The minimum size of a slab (hardware buffer), in bytes.
95    ///
96    /// The default value is 1 MiB.
97    pub min_slab_size: u64,
98
99    /// The maximum size of a slab (hardware buffer), in bytes.
100    ///
101    /// When a slab reaches this limit, a new slab is created.
102    ///
103    /// The default value is 512 MiB.
104    pub max_slab_size: u64,
105
106    /// The maximum size of vertex or index data that can be placed in a general
107    /// slab, in bytes.
108    ///
109    /// If a mesh has vertex or index data that exceeds this size limit, that
110    /// data is placed in its own slab. This reduces fragmentation, but incurs
111    /// more CPU-side binding overhead when drawing the mesh.
112    ///
113    /// The default value is 256 MiB.
114    pub large_threshold: u64,
115
116    /// The factor by which we scale a slab when growing it.
117    ///
118    /// This value must be greater than 1. Higher values result in more
119    /// fragmentation but fewer expensive copy operations when growing the
120    /// buffer.
121    ///
122    /// The default value is 1.5.
123    pub growth_factor: f64,
124}
125
126impl Default for MeshAllocatorSettings {
127    fn default() -> Self {
128        Self {
129            // 1 MiB
130            min_slab_size: 1024 * 1024,
131            // 512 MiB
132            max_slab_size: 1024 * 1024 * 512,
133            // 256 MiB
134            large_threshold: 1024 * 1024 * 256,
135            // 1.5× growth
136            growth_factor: 1.5,
137        }
138    }
139}
140
141/// The hardware buffer that mesh data lives in, as well as the range within
142/// that buffer.
143pub struct MeshBufferSlice<'a> {
144    /// The buffer that the mesh data resides in.
145    pub buffer: &'a Buffer,
146
147    /// The range of elements within this buffer that the mesh data resides in,
148    /// measured in elements.
149    ///
150    /// This is not a byte range; it's an element range. For vertex data, this
151    /// is measured in increments of a single vertex. (Thus, if a vertex is 32
152    /// bytes long, then this range is in units of 32 bytes each.) For index
153    /// data, this is measured in increments of a single index value (2 or 4
154    /// bytes). Draw commands generally take their ranges in elements, not
155    /// bytes, so this is the most convenient unit in this case.
156    pub range: Range<u32>,
157}
158
159/// The index of a single slab.
160#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
161#[repr(transparent)]
162pub struct SlabId(pub NonMaxU32);
163
164/// Data for a single slab.
165#[expect(
166    clippy::large_enum_variant,
167    reason = "See https://github.com/bevyengine/bevy/issues/19220"
168)]
169enum Slab {
170    /// A slab that can contain multiple objects.
171    General(GeneralSlab),
172    /// A slab that contains a single object.
173    LargeObject(LargeObjectSlab),
174}
175
176/// A resizable slab that can contain multiple objects.
177///
178/// This is the normal type of slab used for objects that are below the
179/// [`MeshAllocatorSettings::large_threshold`]. Slabs are divided into *slots*,
180/// which are described in detail in the [`ElementLayout`] documentation.
181struct GeneralSlab {
182    /// The [`Allocator`] that manages the objects in this slab.
183    allocator: Allocator,
184
185    /// The GPU buffer that backs this slab.
186    ///
187    /// This may be `None` if the buffer hasn't been created yet. We delay
188    /// creation of buffers until allocating all the meshes for a single frame,
189    /// so that we don't needlessly create and resize buffers when many meshes
190    /// load all at once.
191    buffer: Option<Buffer>,
192
193    /// Allocations that are on the GPU.
194    ///
195    /// The range is in slots.
196    resident_allocations: HashMap<AssetId<Mesh>, SlabAllocation>,
197
198    /// Allocations that are waiting to be uploaded to the GPU.
199    ///
200    /// The range is in slots.
201    pending_allocations: HashMap<AssetId<Mesh>, SlabAllocation>,
202
203    /// The layout of a single element (vertex or index).
204    element_layout: ElementLayout,
205
206    /// The size of this slab in slots.
207    current_slot_capacity: u32,
208}
209
210/// A slab that contains a single object.
211///
212/// Typically, this is for objects that exceed the
213/// [`MeshAllocatorSettings::large_threshold`]. This is also for objects that
214/// would ordinarily receive their own slab but can't because of platform
215/// limitations, most notably vertex arrays on WebGL 2.
216struct LargeObjectSlab {
217    /// The GPU buffer that backs this slab.
218    ///
219    /// This may be `None` if the buffer hasn't been created yet.
220    buffer: Option<Buffer>,
221
222    /// The layout of a single element (vertex or index).
223    element_layout: ElementLayout,
224}
225
226/// The type of element that a slab can store.
227#[derive(Clone, Copy, PartialEq, Eq, Hash)]
228enum ElementClass {
229    /// Data for a vertex.
230    Vertex,
231    /// A vertex index.
232    Index,
233}
234
235/// The results of [`GeneralSlab::grow_if_necessary`].
236enum SlabGrowthResult {
237    /// The mesh data already fits in the slab; the slab doesn't need to grow.
238    NoGrowthNeeded,
239    /// The slab needed to grow.
240    ///
241    /// The [`SlabToReallocate`] contains the old capacity of the slab.
242    NeededGrowth(SlabToReallocate),
243    /// The slab wanted to grow but couldn't because it hit its maximum size.
244    CantGrow,
245}
246
247/// Information about the size of individual elements (vertices or indices)
248/// within a slab.
249///
250/// Slab objects are allocated in units of *slots*. Usually, each element takes
251/// up one slot, and so elements and slots are equivalent. Occasionally,
252/// however, a slot may consist of 2 or even 4 elements. This occurs when the
253/// size of an element isn't divisible by [`COPY_BUFFER_ALIGNMENT`]. When we
254/// resize buffers, we perform GPU-to-GPU copies to shuffle the existing
255/// elements into their new positions, and such copies must be on
256/// [`COPY_BUFFER_ALIGNMENT`] boundaries. Slots solve this problem by
257/// guaranteeing that the size of an allocation quantum is divisible by both the
258/// size of an element and [`COPY_BUFFER_ALIGNMENT`], so we can relocate it
259/// freely.
260#[derive(Clone, Copy, PartialEq, Eq, Hash)]
261struct ElementLayout {
262    /// Either a vertex or an index.
263    class: ElementClass,
264
265    /// The size in bytes of a single element (vertex or index).
266    size: u64,
267
268    /// The number of elements that make up a single slot.
269    ///
270    /// Usually, this is 1, but it can be different if [`ElementLayout::size`]
271    /// isn't divisible by 4. See the comment in [`ElementLayout`] for more
272    /// details.
273    elements_per_slot: u32,
274}
275
276/// The location of an allocation and the slab it's contained in.
277struct MeshAllocation {
278    /// The ID of the slab.
279    slab_id: SlabId,
280    /// Holds the actual allocation.
281    slab_allocation: SlabAllocation,
282}
283
284/// An allocation within a slab.
285#[derive(Clone)]
286struct SlabAllocation {
287    /// The actual [`Allocator`] handle, needed to free the allocation.
288    allocation: Allocation,
289    /// The number of slots that this allocation takes up.
290    slot_count: u32,
291}
292
293/// Holds information about all slabs scheduled to be allocated or reallocated.
294#[derive(Default, Deref, DerefMut)]
295struct SlabsToReallocate(HashMap<SlabId, SlabToReallocate>);
296
297/// Holds information about a slab that's scheduled to be allocated or
298/// reallocated.
299#[derive(Default)]
300struct SlabToReallocate {
301    /// The capacity of the slab before we decided to grow it.
302    old_slot_capacity: u32,
303}
304
305impl Display for SlabId {
306    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
307        self.0.fmt(f)
308    }
309}
310
311impl Plugin for MeshAllocatorPlugin {
312    fn build(&self, app: &mut App) {
313        let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
314            return;
315        };
316
317        render_app
318            .init_resource::<MeshAllocatorSettings>()
319            .add_systems(
320                Render,
321                allocate_and_free_meshes
322                    .in_set(RenderSystems::PrepareAssets)
323                    .before(prepare_assets::<RenderMesh>),
324            );
325    }
326
327    fn finish(&self, app: &mut App) {
328        let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
329            return;
330        };
331
332        // The `RenderAdapter` isn't available until now, so we can't do this in
333        // [`Plugin::build`].
334        render_app.init_resource::<MeshAllocator>();
335    }
336}
337
338impl FromWorld for MeshAllocator {
339    fn from_world(world: &mut World) -> Self {
340        // Note whether we're on WebGL 2. In this case, we must give every
341        // vertex array its own slab.
342        let render_adapter = world.resource::<RenderAdapter>();
343        let general_vertex_slabs_supported = render_adapter
344            .get_downlevel_capabilities()
345            .flags
346            .contains(DownlevelFlags::BASE_VERTEX);
347
348        Self {
349            slabs: HashMap::default(),
350            slab_layouts: HashMap::default(),
351            mesh_id_to_vertex_slab: HashMap::default(),
352            mesh_id_to_index_slab: HashMap::default(),
353            next_slab_id: default(),
354            general_vertex_slabs_supported,
355            extra_buffer_usages: BufferUsages::empty(),
356        }
357    }
358}
359
360/// A system that processes newly-extracted or newly-removed meshes and writes
361/// their data into buffers or frees their data as appropriate.
362pub fn allocate_and_free_meshes(
363    mut mesh_allocator: ResMut<MeshAllocator>,
364    mesh_allocator_settings: Res<MeshAllocatorSettings>,
365    extracted_meshes: Res<ExtractedAssets<RenderMesh>>,
366    mut mesh_vertex_buffer_layouts: ResMut<MeshVertexBufferLayouts>,
367    render_device: Res<RenderDevice>,
368    render_queue: Res<RenderQueue>,
369) {
370    // Process removed or modified meshes.
371    mesh_allocator.free_meshes(&extracted_meshes);
372
373    // Process newly-added or modified meshes.
374    mesh_allocator.allocate_meshes(
375        &mesh_allocator_settings,
376        &extracted_meshes,
377        &mut mesh_vertex_buffer_layouts,
378        &render_device,
379        &render_queue,
380    );
381}
382
383impl MeshAllocator {
384    /// Returns the buffer and range within that buffer of the vertex data for
385    /// the mesh with the given ID.
386    ///
387    /// If the mesh wasn't allocated, returns None.
388    pub fn mesh_vertex_slice(&self, mesh_id: &AssetId<Mesh>) -> Option<MeshBufferSlice<'_>> {
389        self.mesh_slice_in_slab(mesh_id, *self.mesh_id_to_vertex_slab.get(mesh_id)?)
390    }
391
392    /// Returns the buffer and range within that buffer of the index data for
393    /// the mesh with the given ID.
394    ///
395    /// If the mesh has no index data or wasn't allocated, returns None.
396    pub fn mesh_index_slice(&self, mesh_id: &AssetId<Mesh>) -> Option<MeshBufferSlice<'_>> {
397        self.mesh_slice_in_slab(mesh_id, *self.mesh_id_to_index_slab.get(mesh_id)?)
398    }
399
400    /// Returns the IDs of the vertex buffer and index buffer respectively for
401    /// the mesh with the given ID.
402    ///
403    /// If the mesh wasn't allocated, or has no index data in the case of the
404    /// index buffer, the corresponding element in the returned tuple will be
405    /// None.
406    pub fn mesh_slabs(&self, mesh_id: &AssetId<Mesh>) -> (Option<SlabId>, Option<SlabId>) {
407        (
408            self.mesh_id_to_vertex_slab.get(mesh_id).cloned(),
409            self.mesh_id_to_index_slab.get(mesh_id).cloned(),
410        )
411    }
412
413    /// Given a slab and a mesh with data located with it, returns the buffer
414    /// and range of that mesh data within the slab.
415    fn mesh_slice_in_slab(
416        &self,
417        mesh_id: &AssetId<Mesh>,
418        slab_id: SlabId,
419    ) -> Option<MeshBufferSlice<'_>> {
420        match self.slabs.get(&slab_id)? {
421            Slab::General(general_slab) => {
422                let slab_allocation = general_slab.resident_allocations.get(mesh_id)?;
423                Some(MeshBufferSlice {
424                    buffer: general_slab.buffer.as_ref()?,
425                    range: (slab_allocation.allocation.offset
426                        * general_slab.element_layout.elements_per_slot)
427                        ..((slab_allocation.allocation.offset + slab_allocation.slot_count)
428                            * general_slab.element_layout.elements_per_slot),
429                })
430            }
431
432            Slab::LargeObject(large_object_slab) => {
433                let buffer = large_object_slab.buffer.as_ref()?;
434                Some(MeshBufferSlice {
435                    buffer,
436                    range: 0..((buffer.size() / large_object_slab.element_layout.size) as u32),
437                })
438            }
439        }
440    }
441
442    /// Processes newly-loaded meshes, allocating room in the slabs for their
443    /// mesh data and performing upload operations as appropriate.
444    fn allocate_meshes(
445        &mut self,
446        mesh_allocator_settings: &MeshAllocatorSettings,
447        extracted_meshes: &ExtractedAssets<RenderMesh>,
448        mesh_vertex_buffer_layouts: &mut MeshVertexBufferLayouts,
449        render_device: &RenderDevice,
450        render_queue: &RenderQueue,
451    ) {
452        let mut slabs_to_grow = SlabsToReallocate::default();
453
454        // Allocate.
455        for (mesh_id, mesh) in &extracted_meshes.extracted {
456            let vertex_buffer_size = mesh.get_vertex_buffer_size() as u64;
457            if vertex_buffer_size == 0 {
458                continue;
459            }
460            // Allocate vertex data. Note that we can only pack mesh vertex data
461            // together if the platform supports it.
462            let vertex_element_layout = ElementLayout::vertex(mesh_vertex_buffer_layouts, mesh);
463            if self.general_vertex_slabs_supported {
464                self.allocate(
465                    mesh_id,
466                    vertex_buffer_size,
467                    vertex_element_layout,
468                    &mut slabs_to_grow,
469                    mesh_allocator_settings,
470                );
471            } else {
472                self.allocate_large(mesh_id, vertex_element_layout);
473            }
474
475            // Allocate index data.
476            if let (Some(index_buffer_data), Some(index_element_layout)) =
477                (mesh.get_index_buffer_bytes(), ElementLayout::index(mesh))
478            {
479                self.allocate(
480                    mesh_id,
481                    index_buffer_data.len() as u64,
482                    index_element_layout,
483                    &mut slabs_to_grow,
484                    mesh_allocator_settings,
485                );
486            }
487        }
488
489        // Perform growth.
490        for (slab_id, slab_to_grow) in slabs_to_grow.0 {
491            self.reallocate_slab(render_device, render_queue, slab_id, slab_to_grow);
492        }
493
494        // Copy new mesh data in.
495        for (mesh_id, mesh) in &extracted_meshes.extracted {
496            self.copy_mesh_vertex_data(mesh_id, mesh, render_device, render_queue);
497            self.copy_mesh_index_data(mesh_id, mesh, render_device, render_queue);
498        }
499    }
500
501    /// Copies vertex array data from a mesh into the appropriate spot in the
502    /// slab.
503    fn copy_mesh_vertex_data(
504        &mut self,
505        mesh_id: &AssetId<Mesh>,
506        mesh: &Mesh,
507        render_device: &RenderDevice,
508        render_queue: &RenderQueue,
509    ) {
510        let Some(&slab_id) = self.mesh_id_to_vertex_slab.get(mesh_id) else {
511            return;
512        };
513
514        // Call the generic function.
515        self.copy_element_data(
516            mesh_id,
517            mesh.get_vertex_buffer_size(),
518            |slice| mesh.write_packed_vertex_buffer_data(slice),
519            BufferUsages::VERTEX,
520            slab_id,
521            render_device,
522            render_queue,
523        );
524    }
525
526    /// Copies index array data from a mesh into the appropriate spot in the
527    /// slab.
528    fn copy_mesh_index_data(
529        &mut self,
530        mesh_id: &AssetId<Mesh>,
531        mesh: &Mesh,
532        render_device: &RenderDevice,
533        render_queue: &RenderQueue,
534    ) {
535        let Some(&slab_id) = self.mesh_id_to_index_slab.get(mesh_id) else {
536            return;
537        };
538        let Some(index_data) = mesh.get_index_buffer_bytes() else {
539            return;
540        };
541
542        // Call the generic function.
543        self.copy_element_data(
544            mesh_id,
545            index_data.len(),
546            |slice| slice.copy_from_slice(index_data),
547            BufferUsages::INDEX,
548            slab_id,
549            render_device,
550            render_queue,
551        );
552    }
553
554    /// A generic function that copies either vertex or index data into a slab.
555    fn copy_element_data(
556        &mut self,
557        mesh_id: &AssetId<Mesh>,
558        len: usize,
559        fill_data: impl Fn(&mut [u8]),
560        buffer_usages: BufferUsages,
561        slab_id: SlabId,
562        render_device: &RenderDevice,
563        render_queue: &RenderQueue,
564    ) {
565        let Some(slab) = self.slabs.get_mut(&slab_id) else {
566            return;
567        };
568
569        match *slab {
570            Slab::General(ref mut general_slab) => {
571                let (Some(buffer), Some(allocated_range)) = (
572                    &general_slab.buffer,
573                    general_slab.pending_allocations.remove(mesh_id),
574                ) else {
575                    return;
576                };
577
578                let slot_size = general_slab.element_layout.slot_size();
579
580                // round up size to a multiple of the slot size to satisfy wgpu alignment requirements
581                if let Some(size) = BufferSize::new((len as u64).next_multiple_of(slot_size)) {
582                    // Write the data in.
583                    if let Some(mut buffer) = render_queue.write_buffer_with(
584                        buffer,
585                        allocated_range.allocation.offset as u64 * slot_size,
586                        size,
587                    ) {
588                        let slice = &mut buffer.as_mut()[..len];
589                        fill_data(slice);
590                    }
591                }
592
593                // Mark the allocation as resident.
594                general_slab
595                    .resident_allocations
596                    .insert(*mesh_id, allocated_range);
597            }
598
599            Slab::LargeObject(ref mut large_object_slab) => {
600                debug_assert!(large_object_slab.buffer.is_none());
601
602                // Create the buffer and its data in one go.
603                let buffer = render_device.create_buffer(&BufferDescriptor {
604                    label: Some(&format!(
605                        "large mesh slab {} ({}buffer)",
606                        slab_id,
607                        buffer_usages_to_str(buffer_usages)
608                    )),
609                    size: len as u64,
610                    usage: buffer_usages | BufferUsages::COPY_DST | self.extra_buffer_usages,
611                    mapped_at_creation: true,
612                });
613                {
614                    let slice = &mut buffer.slice(..).get_mapped_range_mut()[..len];
615                    fill_data(slice);
616                }
617                buffer.unmap();
618                large_object_slab.buffer = Some(buffer);
619            }
620        }
621    }
622
623    /// Frees allocations for meshes that were removed or modified this frame.
624    fn free_meshes(&mut self, extracted_meshes: &ExtractedAssets<RenderMesh>) {
625        let mut empty_slabs = <HashSet<_>>::default();
626
627        // TODO: Consider explicitly reusing allocations for changed meshes of the same size
628        let meshes_to_free = extracted_meshes
629            .removed
630            .iter()
631            .chain(extracted_meshes.modified.iter());
632
633        for mesh_id in meshes_to_free {
634            if let Some(slab_id) = self.mesh_id_to_vertex_slab.remove(mesh_id) {
635                self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs);
636            }
637            if let Some(slab_id) = self.mesh_id_to_index_slab.remove(mesh_id) {
638                self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs);
639            }
640        }
641
642        for empty_slab in empty_slabs {
643            self.slab_layouts.values_mut().for_each(|slab_ids| {
644                let idx = slab_ids.iter().position(|&slab_id| slab_id == empty_slab);
645                if let Some(idx) = idx {
646                    slab_ids.remove(idx);
647                }
648            });
649            self.slabs.remove(&empty_slab);
650        }
651    }
652
653    /// Given a slab and the ID of a mesh containing data in it, marks the
654    /// allocation as free.
655    ///
656    /// If this results in the slab becoming empty, this function adds the slab
657    /// to the `empty_slabs` set.
658    fn free_allocation_in_slab(
659        &mut self,
660        mesh_id: &AssetId<Mesh>,
661        slab_id: SlabId,
662        empty_slabs: &mut HashSet<SlabId>,
663    ) {
664        let Some(slab) = self.slabs.get_mut(&slab_id) else {
665            return;
666        };
667
668        match *slab {
669            Slab::General(ref mut general_slab) => {
670                let Some(slab_allocation) = general_slab
671                    .resident_allocations
672                    .remove(mesh_id)
673                    .or_else(|| general_slab.pending_allocations.remove(mesh_id))
674                else {
675                    return;
676                };
677
678                general_slab.allocator.free(slab_allocation.allocation);
679
680                if general_slab.is_empty() {
681                    empty_slabs.insert(slab_id);
682                }
683            }
684            Slab::LargeObject(_) => {
685                empty_slabs.insert(slab_id);
686            }
687        }
688    }
689
690    /// Allocates space for mesh data with the given byte size and layout in the
691    /// appropriate slab, creating that slab if necessary.
692    fn allocate(
693        &mut self,
694        mesh_id: &AssetId<Mesh>,
695        data_byte_len: u64,
696        layout: ElementLayout,
697        slabs_to_grow: &mut SlabsToReallocate,
698        settings: &MeshAllocatorSettings,
699    ) {
700        let data_element_count = data_byte_len.div_ceil(layout.size) as u32;
701        let data_slot_count = data_element_count.div_ceil(layout.elements_per_slot);
702
703        // If the mesh data is too large for a slab, give it a slab of its own.
704        if data_slot_count as u64 * layout.slot_size()
705            >= settings.large_threshold.min(settings.max_slab_size)
706        {
707            self.allocate_large(mesh_id, layout);
708        } else {
709            self.allocate_general(mesh_id, data_slot_count, layout, slabs_to_grow, settings);
710        }
711    }
712
713    /// Allocates space for mesh data with the given slot size and layout in the
714    /// appropriate general slab.
715    fn allocate_general(
716        &mut self,
717        mesh_id: &AssetId<Mesh>,
718        data_slot_count: u32,
719        layout: ElementLayout,
720        slabs_to_grow: &mut SlabsToReallocate,
721        settings: &MeshAllocatorSettings,
722    ) {
723        let candidate_slabs = self.slab_layouts.entry(layout).or_default();
724
725        // Loop through the slabs that accept elements of the appropriate type
726        // and try to allocate the mesh inside them. We go with the first one
727        // that succeeds.
728        let mut mesh_allocation = None;
729        for &slab_id in &*candidate_slabs {
730            let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else {
731                unreachable!("Slab not found")
732            };
733
734            let Some(allocation) = slab.allocator.allocate(data_slot_count) else {
735                continue;
736            };
737
738            // Try to fit the object in the slab, growing if necessary.
739            match slab.grow_if_necessary(allocation.offset + data_slot_count, settings) {
740                SlabGrowthResult::NoGrowthNeeded => {}
741                SlabGrowthResult::NeededGrowth(slab_to_reallocate) => {
742                    // If we already grew the slab this frame, don't replace the
743                    // `SlabToReallocate` entry. We want to keep the entry
744                    // corresponding to the size that the slab had at the start
745                    // of the frame, so that we can copy only the used portion
746                    // of the initial buffer to the new one.
747                    if let Entry::Vacant(vacant_entry) = slabs_to_grow.entry(slab_id) {
748                        vacant_entry.insert(slab_to_reallocate);
749                    }
750                }
751                SlabGrowthResult::CantGrow => continue,
752            }
753
754            mesh_allocation = Some(MeshAllocation {
755                slab_id,
756                slab_allocation: SlabAllocation {
757                    allocation,
758                    slot_count: data_slot_count,
759                },
760            });
761            break;
762        }
763
764        // If we still have no allocation, make a new slab.
765        if mesh_allocation.is_none() {
766            let new_slab_id = self.next_slab_id;
767            self.next_slab_id.0 = NonMaxU32::new(self.next_slab_id.0.get() + 1).unwrap_or_default();
768
769            let new_slab = GeneralSlab::new(
770                new_slab_id,
771                &mut mesh_allocation,
772                settings,
773                layout,
774                data_slot_count,
775            );
776
777            self.slabs.insert(new_slab_id, Slab::General(new_slab));
778            candidate_slabs.push(new_slab_id);
779            slabs_to_grow.insert(new_slab_id, SlabToReallocate::default());
780        }
781
782        let mesh_allocation = mesh_allocation.expect("Should have been able to allocate");
783
784        // Mark the allocation as pending. Don't copy it in just yet; further
785        // meshes loaded this frame may result in its final allocation location
786        // changing.
787        if let Some(Slab::General(general_slab)) = self.slabs.get_mut(&mesh_allocation.slab_id) {
788            general_slab
789                .pending_allocations
790                .insert(*mesh_id, mesh_allocation.slab_allocation);
791        };
792
793        self.record_allocation(mesh_id, mesh_allocation.slab_id, layout.class);
794    }
795
796    /// Allocates an object into its own dedicated slab.
797    fn allocate_large(&mut self, mesh_id: &AssetId<Mesh>, layout: ElementLayout) {
798        let new_slab_id = self.next_slab_id;
799        self.next_slab_id.0 = NonMaxU32::new(self.next_slab_id.0.get() + 1).unwrap_or_default();
800
801        self.record_allocation(mesh_id, new_slab_id, layout.class);
802
803        self.slabs.insert(
804            new_slab_id,
805            Slab::LargeObject(LargeObjectSlab {
806                buffer: None,
807                element_layout: layout,
808            }),
809        );
810    }
811
812    /// Reallocates a slab that needs to be resized, or allocates a new slab.
813    ///
814    /// This performs the actual growth operation that
815    /// [`GeneralSlab::grow_if_necessary`] scheduled. We do the growth in two
816    /// phases so that, if a slab grows multiple times in the same frame, only
817    /// one new buffer is reallocated, rather than reallocating the buffer
818    /// multiple times.
819    fn reallocate_slab(
820        &mut self,
821        render_device: &RenderDevice,
822        render_queue: &RenderQueue,
823        slab_id: SlabId,
824        slab_to_grow: SlabToReallocate,
825    ) {
826        let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else {
827            error!("Couldn't find slab {} to grow", slab_id);
828            return;
829        };
830
831        let old_buffer = slab.buffer.take();
832
833        let mut buffer_usages = BufferUsages::COPY_SRC | BufferUsages::COPY_DST;
834        match slab.element_layout.class {
835            ElementClass::Vertex => buffer_usages |= BufferUsages::VERTEX,
836            ElementClass::Index => buffer_usages |= BufferUsages::INDEX,
837        };
838
839        // Create the buffer.
840        let new_buffer = render_device.create_buffer(&BufferDescriptor {
841            label: Some(&format!(
842                "general mesh slab {} ({}buffer)",
843                slab_id,
844                buffer_usages_to_str(buffer_usages)
845            )),
846            size: slab.current_slot_capacity as u64 * slab.element_layout.slot_size(),
847            usage: buffer_usages | self.extra_buffer_usages,
848            mapped_at_creation: false,
849        });
850
851        slab.buffer = Some(new_buffer.clone());
852
853        let Some(old_buffer) = old_buffer else { return };
854
855        // In order to do buffer copies, we need a command encoder.
856        let mut encoder = render_device.create_command_encoder(&CommandEncoderDescriptor {
857            label: Some("slab resize encoder"),
858        });
859
860        // Copy the data from the old buffer into the new one.
861        encoder.copy_buffer_to_buffer(
862            &old_buffer,
863            0,
864            &new_buffer,
865            0,
866            slab_to_grow.old_slot_capacity as u64 * slab.element_layout.slot_size(),
867        );
868
869        let command_buffer = encoder.finish();
870        render_queue.submit([command_buffer]);
871    }
872
873    /// Records the location of the given newly-allocated mesh data in the
874    /// [`Self::mesh_id_to_vertex_slab`] or [`Self::mesh_id_to_index_slab`]
875    /// tables as appropriate.
876    fn record_allocation(
877        &mut self,
878        mesh_id: &AssetId<Mesh>,
879        slab_id: SlabId,
880        element_class: ElementClass,
881    ) {
882        match element_class {
883            ElementClass::Vertex => {
884                self.mesh_id_to_vertex_slab.insert(*mesh_id, slab_id);
885            }
886            ElementClass::Index => {
887                self.mesh_id_to_index_slab.insert(*mesh_id, slab_id);
888            }
889        }
890    }
891}
892
893impl GeneralSlab {
894    /// Creates a new growable slab big enough to hold a single element of
895    /// `data_slot_count` size with the given `layout`.
896    fn new(
897        new_slab_id: SlabId,
898        mesh_allocation: &mut Option<MeshAllocation>,
899        settings: &MeshAllocatorSettings,
900        layout: ElementLayout,
901        data_slot_count: u32,
902    ) -> GeneralSlab {
903        let initial_slab_slot_capacity = (settings.min_slab_size.div_ceil(layout.slot_size())
904            as u32)
905            .max(offset_allocator::ext::min_allocator_size(data_slot_count));
906        let max_slab_slot_capacity = (settings.max_slab_size.div_ceil(layout.slot_size()) as u32)
907            .max(offset_allocator::ext::min_allocator_size(data_slot_count));
908
909        let mut new_slab = GeneralSlab {
910            allocator: Allocator::new(max_slab_slot_capacity),
911            buffer: None,
912            resident_allocations: HashMap::default(),
913            pending_allocations: HashMap::default(),
914            element_layout: layout,
915            current_slot_capacity: initial_slab_slot_capacity,
916        };
917
918        // This should never fail.
919        if let Some(allocation) = new_slab.allocator.allocate(data_slot_count) {
920            *mesh_allocation = Some(MeshAllocation {
921                slab_id: new_slab_id,
922                slab_allocation: SlabAllocation {
923                    slot_count: data_slot_count,
924                    allocation,
925                },
926            });
927        }
928
929        new_slab
930    }
931
932    /// Checks to see if the size of this slab is at least `new_size_in_slots`
933    /// and grows the slab if it isn't.
934    ///
935    /// The returned [`SlabGrowthResult`] describes whether the slab needed to
936    /// grow and whether, if so, it was successful in doing so.
937    fn grow_if_necessary(
938        &mut self,
939        new_size_in_slots: u32,
940        settings: &MeshAllocatorSettings,
941    ) -> SlabGrowthResult {
942        // Is the slab big enough already?
943        let initial_slot_capacity = self.current_slot_capacity;
944        if self.current_slot_capacity >= new_size_in_slots {
945            return SlabGrowthResult::NoGrowthNeeded;
946        }
947
948        // Try to grow in increments of `MeshAllocatorSettings::growth_factor`
949        // until we're big enough.
950        while self.current_slot_capacity < new_size_in_slots {
951            let new_slab_slot_capacity =
952                ((self.current_slot_capacity as f64 * settings.growth_factor).ceil() as u32)
953                    .min((settings.max_slab_size / self.element_layout.slot_size()) as u32);
954            if new_slab_slot_capacity == self.current_slot_capacity {
955                // The slab is full.
956                return SlabGrowthResult::CantGrow;
957            }
958
959            self.current_slot_capacity = new_slab_slot_capacity;
960        }
961
962        // Tell our caller what we did.
963        SlabGrowthResult::NeededGrowth(SlabToReallocate {
964            old_slot_capacity: initial_slot_capacity,
965        })
966    }
967}
968
969impl ElementLayout {
970    /// Creates an [`ElementLayout`] for mesh data of the given class (vertex or
971    /// index) with the given byte size.
972    fn new(class: ElementClass, size: u64) -> ElementLayout {
973        const {
974            assert!(4 == COPY_BUFFER_ALIGNMENT);
975        }
976        // this is equivalent to `4 / gcd(4,size)` but lets us not implement gcd.
977        // ping @atlv if above assert ever fails (likely never)
978        let elements_per_slot = [1, 4, 2, 4][size as usize & 3];
979        ElementLayout {
980            class,
981            size,
982            // Make sure that slot boundaries begin and end on
983            // `COPY_BUFFER_ALIGNMENT`-byte (4-byte) boundaries.
984            elements_per_slot,
985        }
986    }
987
988    fn slot_size(&self) -> u64 {
989        self.size * self.elements_per_slot as u64
990    }
991
992    /// Creates the appropriate [`ElementLayout`] for the given mesh's vertex
993    /// data.
994    fn vertex(
995        mesh_vertex_buffer_layouts: &mut MeshVertexBufferLayouts,
996        mesh: &Mesh,
997    ) -> ElementLayout {
998        let mesh_vertex_buffer_layout =
999            mesh.get_mesh_vertex_buffer_layout(mesh_vertex_buffer_layouts);
1000        ElementLayout::new(
1001            ElementClass::Vertex,
1002            mesh_vertex_buffer_layout.0.layout().array_stride,
1003        )
1004    }
1005
1006    /// Creates the appropriate [`ElementLayout`] for the given mesh's index
1007    /// data.
1008    fn index(mesh: &Mesh) -> Option<ElementLayout> {
1009        let size = match mesh.indices()? {
1010            Indices::U16(_) => 2,
1011            Indices::U32(_) => 4,
1012        };
1013        Some(ElementLayout::new(ElementClass::Index, size))
1014    }
1015}
1016
1017impl GeneralSlab {
1018    /// Returns true if this slab is empty.
1019    fn is_empty(&self) -> bool {
1020        self.resident_allocations.is_empty() && self.pending_allocations.is_empty()
1021    }
1022}
1023
1024/// Returns a string describing the given buffer usages.
1025fn buffer_usages_to_str(buffer_usages: BufferUsages) -> &'static str {
1026    if buffer_usages.contains(BufferUsages::VERTEX) {
1027        "vertex "
1028    } else if buffer_usages.contains(BufferUsages::INDEX) {
1029        "index "
1030    } else {
1031        ""
1032    }
1033}