1use alloc::vec::Vec;
4use core::{
5 fmt::{self, Display, Formatter},
6 ops::Range,
7};
8use nonmax::NonMaxU32;
9
10use bevy_app::{App, Plugin};
11use bevy_asset::AssetId;
12use bevy_derive::{Deref, DerefMut};
13use bevy_ecs::{
14 resource::Resource,
15 schedule::IntoScheduleConfigs as _,
16 system::{Res, ResMut},
17 world::{FromWorld, World},
18};
19use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet};
20use bevy_utils::default;
21use offset_allocator::{Allocation, Allocator};
22use tracing::error;
23use wgpu::{
24 BufferDescriptor, BufferSize, BufferUsages, CommandEncoderDescriptor, DownlevelFlags,
25 COPY_BUFFER_ALIGNMENT,
26};
27
28use crate::{
29 mesh::{Indices, Mesh, MeshVertexBufferLayouts, RenderMesh},
30 render_asset::{prepare_assets, ExtractedAssets},
31 render_resource::Buffer,
32 renderer::{RenderAdapter, RenderDevice, RenderQueue},
33 Render, RenderApp, RenderSet,
34};
35
36pub struct MeshAllocatorPlugin;
38
39#[derive(Resource)]
55pub struct MeshAllocator {
56 slabs: HashMap<SlabId, Slab>,
58
59 slab_layouts: HashMap<ElementLayout, Vec<SlabId>>,
64
65 mesh_id_to_vertex_slab: HashMap<AssetId<Mesh>, SlabId>,
67
68 mesh_id_to_index_slab: HashMap<AssetId<Mesh>, SlabId>,
70
71 next_slab_id: SlabId,
73
74 general_vertex_slabs_supported: bool,
81}
82
83#[derive(Resource)]
89pub struct MeshAllocatorSettings {
90 pub min_slab_size: u64,
94
95 pub max_slab_size: u64,
101
102 pub large_threshold: u64,
111
112 pub growth_factor: f64,
120}
121
122impl Default for MeshAllocatorSettings {
123 fn default() -> Self {
124 Self {
125 min_slab_size: 1024 * 1024,
127 max_slab_size: 1024 * 1024 * 512,
129 large_threshold: 1024 * 1024 * 256,
131 growth_factor: 1.5,
133 }
134 }
135}
136
137pub struct MeshBufferSlice<'a> {
140 pub buffer: &'a Buffer,
142
143 pub range: Range<u32>,
153}
154
155#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
157#[repr(transparent)]
158pub struct SlabId(pub NonMaxU32);
159
160enum Slab {
162 General(GeneralSlab),
164 LargeObject(LargeObjectSlab),
166}
167
168struct GeneralSlab {
174 allocator: Allocator,
176
177 buffer: Option<Buffer>,
184
185 resident_allocations: HashMap<AssetId<Mesh>, SlabAllocation>,
189
190 pending_allocations: HashMap<AssetId<Mesh>, SlabAllocation>,
194
195 element_layout: ElementLayout,
197
198 current_slot_capacity: u32,
200}
201
202struct LargeObjectSlab {
209 buffer: Option<Buffer>,
213
214 element_layout: ElementLayout,
216}
217
218#[derive(Clone, Copy, PartialEq, Eq, Hash)]
220enum ElementClass {
221 Vertex,
223 Index,
225}
226
227enum SlabGrowthResult {
229 NoGrowthNeeded,
231 NeededGrowth(SlabToReallocate),
235 CantGrow,
237}
238
239#[derive(Clone, Copy, PartialEq, Eq, Hash)]
253struct ElementLayout {
254 class: ElementClass,
256
257 size: u64,
259
260 elements_per_slot: u32,
266}
267
268struct MeshAllocation {
270 slab_id: SlabId,
272 slab_allocation: SlabAllocation,
274}
275
276#[derive(Clone)]
278struct SlabAllocation {
279 allocation: Allocation,
281 slot_count: u32,
283}
284
285#[derive(Default, Deref, DerefMut)]
287struct SlabsToReallocate(HashMap<SlabId, SlabToReallocate>);
288
289#[derive(Default)]
292struct SlabToReallocate {
293 old_slot_capacity: u32,
295}
296
297impl Display for SlabId {
298 fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
299 self.0.fmt(f)
300 }
301}
302
303impl Plugin for MeshAllocatorPlugin {
304 fn build(&self, app: &mut App) {
305 let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
306 return;
307 };
308
309 render_app
310 .init_resource::<MeshAllocatorSettings>()
311 .add_systems(
312 Render,
313 allocate_and_free_meshes
314 .in_set(RenderSet::PrepareAssets)
315 .before(prepare_assets::<RenderMesh>),
316 );
317 }
318
319 fn finish(&self, app: &mut App) {
320 let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
321 return;
322 };
323
324 render_app.init_resource::<MeshAllocator>();
327 }
328}
329
330impl FromWorld for MeshAllocator {
331 fn from_world(world: &mut World) -> Self {
332 let render_adapter = world.resource::<RenderAdapter>();
335 let general_vertex_slabs_supported = render_adapter
336 .get_downlevel_capabilities()
337 .flags
338 .contains(DownlevelFlags::BASE_VERTEX);
339
340 Self {
341 slabs: HashMap::default(),
342 slab_layouts: HashMap::default(),
343 mesh_id_to_vertex_slab: HashMap::default(),
344 mesh_id_to_index_slab: HashMap::default(),
345 next_slab_id: default(),
346 general_vertex_slabs_supported,
347 }
348 }
349}
350
351pub fn allocate_and_free_meshes(
354 mut mesh_allocator: ResMut<MeshAllocator>,
355 mesh_allocator_settings: Res<MeshAllocatorSettings>,
356 extracted_meshes: Res<ExtractedAssets<RenderMesh>>,
357 mut mesh_vertex_buffer_layouts: ResMut<MeshVertexBufferLayouts>,
358 render_device: Res<RenderDevice>,
359 render_queue: Res<RenderQueue>,
360) {
361 mesh_allocator.free_meshes(&extracted_meshes);
363
364 mesh_allocator.allocate_meshes(
366 &mesh_allocator_settings,
367 &extracted_meshes,
368 &mut mesh_vertex_buffer_layouts,
369 &render_device,
370 &render_queue,
371 );
372}
373
374impl MeshAllocator {
375 pub fn mesh_vertex_slice(&self, mesh_id: &AssetId<Mesh>) -> Option<MeshBufferSlice> {
380 self.mesh_slice_in_slab(mesh_id, *self.mesh_id_to_vertex_slab.get(mesh_id)?)
381 }
382
383 pub fn mesh_index_slice(&self, mesh_id: &AssetId<Mesh>) -> Option<MeshBufferSlice> {
388 self.mesh_slice_in_slab(mesh_id, *self.mesh_id_to_index_slab.get(mesh_id)?)
389 }
390
391 pub fn mesh_slabs(&self, mesh_id: &AssetId<Mesh>) -> (Option<SlabId>, Option<SlabId>) {
398 (
399 self.mesh_id_to_vertex_slab.get(mesh_id).cloned(),
400 self.mesh_id_to_index_slab.get(mesh_id).cloned(),
401 )
402 }
403
404 fn mesh_slice_in_slab(
407 &self,
408 mesh_id: &AssetId<Mesh>,
409 slab_id: SlabId,
410 ) -> Option<MeshBufferSlice> {
411 match self.slabs.get(&slab_id)? {
412 Slab::General(general_slab) => {
413 let slab_allocation = general_slab.resident_allocations.get(mesh_id)?;
414 Some(MeshBufferSlice {
415 buffer: general_slab.buffer.as_ref()?,
416 range: (slab_allocation.allocation.offset
417 * general_slab.element_layout.elements_per_slot)
418 ..((slab_allocation.allocation.offset + slab_allocation.slot_count)
419 * general_slab.element_layout.elements_per_slot),
420 })
421 }
422
423 Slab::LargeObject(large_object_slab) => {
424 let buffer = large_object_slab.buffer.as_ref()?;
425 Some(MeshBufferSlice {
426 buffer,
427 range: 0..((buffer.size() / large_object_slab.element_layout.size) as u32),
428 })
429 }
430 }
431 }
432
433 fn allocate_meshes(
436 &mut self,
437 mesh_allocator_settings: &MeshAllocatorSettings,
438 extracted_meshes: &ExtractedAssets<RenderMesh>,
439 mesh_vertex_buffer_layouts: &mut MeshVertexBufferLayouts,
440 render_device: &RenderDevice,
441 render_queue: &RenderQueue,
442 ) {
443 let mut slabs_to_grow = SlabsToReallocate::default();
444
445 for (mesh_id, mesh) in &extracted_meshes.extracted {
447 let vertex_element_layout = ElementLayout::vertex(mesh_vertex_buffer_layouts, mesh);
450 if self.general_vertex_slabs_supported {
451 self.allocate(
452 mesh_id,
453 mesh.get_vertex_buffer_size() as u64,
454 vertex_element_layout,
455 &mut slabs_to_grow,
456 mesh_allocator_settings,
457 );
458 } else {
459 self.allocate_large(mesh_id, vertex_element_layout);
460 }
461
462 if let (Some(index_buffer_data), Some(index_element_layout)) =
464 (mesh.get_index_buffer_bytes(), ElementLayout::index(mesh))
465 {
466 self.allocate(
467 mesh_id,
468 index_buffer_data.len() as u64,
469 index_element_layout,
470 &mut slabs_to_grow,
471 mesh_allocator_settings,
472 );
473 }
474 }
475
476 for (slab_id, slab_to_grow) in slabs_to_grow.0 {
478 self.reallocate_slab(render_device, render_queue, slab_id, slab_to_grow);
479 }
480
481 for (mesh_id, mesh) in &extracted_meshes.extracted {
483 self.copy_mesh_vertex_data(mesh_id, mesh, render_device, render_queue);
484 self.copy_mesh_index_data(mesh_id, mesh, render_device, render_queue);
485 }
486 }
487
488 fn copy_mesh_vertex_data(
491 &mut self,
492 mesh_id: &AssetId<Mesh>,
493 mesh: &Mesh,
494 render_device: &RenderDevice,
495 render_queue: &RenderQueue,
496 ) {
497 let Some(&slab_id) = self.mesh_id_to_vertex_slab.get(mesh_id) else {
498 return;
499 };
500
501 self.copy_element_data(
503 mesh_id,
504 mesh.get_vertex_buffer_size(),
505 |slice| mesh.write_packed_vertex_buffer_data(slice),
506 BufferUsages::VERTEX,
507 slab_id,
508 render_device,
509 render_queue,
510 );
511 }
512
513 fn copy_mesh_index_data(
516 &mut self,
517 mesh_id: &AssetId<Mesh>,
518 mesh: &Mesh,
519 render_device: &RenderDevice,
520 render_queue: &RenderQueue,
521 ) {
522 let Some(&slab_id) = self.mesh_id_to_index_slab.get(mesh_id) else {
523 return;
524 };
525 let Some(index_data) = mesh.get_index_buffer_bytes() else {
526 return;
527 };
528
529 self.copy_element_data(
531 mesh_id,
532 index_data.len(),
533 |slice| slice.copy_from_slice(index_data),
534 BufferUsages::INDEX,
535 slab_id,
536 render_device,
537 render_queue,
538 );
539 }
540
541 fn copy_element_data(
543 &mut self,
544 mesh_id: &AssetId<Mesh>,
545 len: usize,
546 fill_data: impl Fn(&mut [u8]),
547 buffer_usages: BufferUsages,
548 slab_id: SlabId,
549 render_device: &RenderDevice,
550 render_queue: &RenderQueue,
551 ) {
552 let Some(slab) = self.slabs.get_mut(&slab_id) else {
553 return;
554 };
555
556 match *slab {
557 Slab::General(ref mut general_slab) => {
558 let (Some(buffer), Some(allocated_range)) = (
559 &general_slab.buffer,
560 general_slab.pending_allocations.remove(mesh_id),
561 ) else {
562 return;
563 };
564
565 let slot_size = general_slab.element_layout.slot_size();
566
567 if let Some(size) = BufferSize::new((len as u64).next_multiple_of(slot_size)) {
569 if let Some(mut buffer) = render_queue.write_buffer_with(
571 buffer,
572 allocated_range.allocation.offset as u64 * slot_size,
573 size,
574 ) {
575 let slice = &mut buffer.as_mut()[..len];
576 fill_data(slice);
577 }
578 }
579
580 general_slab
582 .resident_allocations
583 .insert(*mesh_id, allocated_range);
584 }
585
586 Slab::LargeObject(ref mut large_object_slab) => {
587 debug_assert!(large_object_slab.buffer.is_none());
588
589 let buffer = render_device.create_buffer(&BufferDescriptor {
591 label: Some(&format!(
592 "large mesh slab {} ({}buffer)",
593 slab_id,
594 buffer_usages_to_str(buffer_usages)
595 )),
596 size: len as u64,
597 usage: buffer_usages | BufferUsages::COPY_DST,
598 mapped_at_creation: true,
599 });
600 {
601 let slice = &mut buffer.slice(..).get_mapped_range_mut()[..len];
602 fill_data(slice);
603 }
604 buffer.unmap();
605 large_object_slab.buffer = Some(buffer);
606 }
607 }
608 }
609
610 fn free_meshes(&mut self, extracted_meshes: &ExtractedAssets<RenderMesh>) {
612 let mut empty_slabs = <HashSet<_>>::default();
613
614 let meshes_to_free = extracted_meshes
616 .removed
617 .iter()
618 .chain(extracted_meshes.modified.iter());
619
620 for mesh_id in meshes_to_free {
621 if let Some(slab_id) = self.mesh_id_to_vertex_slab.remove(mesh_id) {
622 self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs);
623 }
624 if let Some(slab_id) = self.mesh_id_to_index_slab.remove(mesh_id) {
625 self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs);
626 }
627 }
628
629 for empty_slab in empty_slabs {
630 self.slab_layouts.values_mut().for_each(|slab_ids| {
631 let idx = slab_ids.iter().position(|&slab_id| slab_id == empty_slab);
632 if let Some(idx) = idx {
633 slab_ids.remove(idx);
634 }
635 });
636 self.slabs.remove(&empty_slab);
637 }
638 }
639
640 fn free_allocation_in_slab(
646 &mut self,
647 mesh_id: &AssetId<Mesh>,
648 slab_id: SlabId,
649 empty_slabs: &mut HashSet<SlabId>,
650 ) {
651 let Some(slab) = self.slabs.get_mut(&slab_id) else {
652 return;
653 };
654
655 match *slab {
656 Slab::General(ref mut general_slab) => {
657 let Some(slab_allocation) = general_slab
658 .resident_allocations
659 .remove(mesh_id)
660 .or_else(|| general_slab.pending_allocations.remove(mesh_id))
661 else {
662 return;
663 };
664
665 general_slab.allocator.free(slab_allocation.allocation);
666
667 if general_slab.is_empty() {
668 empty_slabs.insert(slab_id);
669 }
670 }
671 Slab::LargeObject(_) => {
672 empty_slabs.insert(slab_id);
673 }
674 }
675 }
676
677 fn allocate(
680 &mut self,
681 mesh_id: &AssetId<Mesh>,
682 data_byte_len: u64,
683 layout: ElementLayout,
684 slabs_to_grow: &mut SlabsToReallocate,
685 settings: &MeshAllocatorSettings,
686 ) {
687 let data_element_count = data_byte_len.div_ceil(layout.size) as u32;
688 let data_slot_count = data_element_count.div_ceil(layout.elements_per_slot);
689
690 if data_slot_count as u64 * layout.slot_size()
692 >= settings.large_threshold.min(settings.max_slab_size)
693 {
694 self.allocate_large(mesh_id, layout);
695 } else {
696 self.allocate_general(mesh_id, data_slot_count, layout, slabs_to_grow, settings);
697 }
698 }
699
700 fn allocate_general(
703 &mut self,
704 mesh_id: &AssetId<Mesh>,
705 data_slot_count: u32,
706 layout: ElementLayout,
707 slabs_to_grow: &mut SlabsToReallocate,
708 settings: &MeshAllocatorSettings,
709 ) {
710 let candidate_slabs = self.slab_layouts.entry(layout).or_default();
711
712 let mut mesh_allocation = None;
716 for &slab_id in &*candidate_slabs {
717 let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else {
718 unreachable!("Slab not found")
719 };
720
721 let Some(allocation) = slab.allocator.allocate(data_slot_count) else {
722 continue;
723 };
724
725 match slab.grow_if_necessary(allocation.offset + data_slot_count, settings) {
727 SlabGrowthResult::NoGrowthNeeded => {}
728 SlabGrowthResult::NeededGrowth(slab_to_reallocate) => {
729 if let Entry::Vacant(vacant_entry) = slabs_to_grow.entry(slab_id) {
735 vacant_entry.insert(slab_to_reallocate);
736 }
737 }
738 SlabGrowthResult::CantGrow => continue,
739 }
740
741 mesh_allocation = Some(MeshAllocation {
742 slab_id,
743 slab_allocation: SlabAllocation {
744 allocation,
745 slot_count: data_slot_count,
746 },
747 });
748 break;
749 }
750
751 if mesh_allocation.is_none() {
753 let new_slab_id = self.next_slab_id;
754 self.next_slab_id.0 = NonMaxU32::new(self.next_slab_id.0.get() + 1).unwrap_or_default();
755
756 let new_slab = GeneralSlab::new(
757 new_slab_id,
758 &mut mesh_allocation,
759 settings,
760 layout,
761 data_slot_count,
762 );
763
764 self.slabs.insert(new_slab_id, Slab::General(new_slab));
765 candidate_slabs.push(new_slab_id);
766 slabs_to_grow.insert(new_slab_id, SlabToReallocate::default());
767 }
768
769 let mesh_allocation = mesh_allocation.expect("Should have been able to allocate");
770
771 if let Some(Slab::General(general_slab)) = self.slabs.get_mut(&mesh_allocation.slab_id) {
775 general_slab
776 .pending_allocations
777 .insert(*mesh_id, mesh_allocation.slab_allocation);
778 };
779
780 self.record_allocation(mesh_id, mesh_allocation.slab_id, layout.class);
781 }
782
783 fn allocate_large(&mut self, mesh_id: &AssetId<Mesh>, layout: ElementLayout) {
785 let new_slab_id = self.next_slab_id;
786 self.next_slab_id.0 = NonMaxU32::new(self.next_slab_id.0.get() + 1).unwrap_or_default();
787
788 self.record_allocation(mesh_id, new_slab_id, layout.class);
789
790 self.slabs.insert(
791 new_slab_id,
792 Slab::LargeObject(LargeObjectSlab {
793 buffer: None,
794 element_layout: layout,
795 }),
796 );
797 }
798
799 fn reallocate_slab(
807 &mut self,
808 render_device: &RenderDevice,
809 render_queue: &RenderQueue,
810 slab_id: SlabId,
811 slab_to_grow: SlabToReallocate,
812 ) {
813 let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else {
814 error!("Couldn't find slab {} to grow", slab_id);
815 return;
816 };
817
818 let old_buffer = slab.buffer.take();
819
820 let mut buffer_usages = BufferUsages::COPY_SRC | BufferUsages::COPY_DST;
821 match slab.element_layout.class {
822 ElementClass::Vertex => buffer_usages |= BufferUsages::VERTEX,
823 ElementClass::Index => buffer_usages |= BufferUsages::INDEX,
824 };
825
826 let new_buffer = render_device.create_buffer(&BufferDescriptor {
828 label: Some(&format!(
829 "general mesh slab {} ({}buffer)",
830 slab_id,
831 buffer_usages_to_str(buffer_usages)
832 )),
833 size: slab.current_slot_capacity as u64 * slab.element_layout.slot_size(),
834 usage: buffer_usages,
835 mapped_at_creation: false,
836 });
837
838 slab.buffer = Some(new_buffer.clone());
839
840 let Some(old_buffer) = old_buffer else { return };
841
842 let mut encoder = render_device.create_command_encoder(&CommandEncoderDescriptor {
844 label: Some("slab resize encoder"),
845 });
846
847 encoder.copy_buffer_to_buffer(
849 &old_buffer,
850 0,
851 &new_buffer,
852 0,
853 slab_to_grow.old_slot_capacity as u64 * slab.element_layout.slot_size(),
854 );
855
856 let command_buffer = encoder.finish();
857 render_queue.submit([command_buffer]);
858 }
859
860 fn record_allocation(
864 &mut self,
865 mesh_id: &AssetId<Mesh>,
866 slab_id: SlabId,
867 element_class: ElementClass,
868 ) {
869 match element_class {
870 ElementClass::Vertex => {
871 self.mesh_id_to_vertex_slab.insert(*mesh_id, slab_id);
872 }
873 ElementClass::Index => {
874 self.mesh_id_to_index_slab.insert(*mesh_id, slab_id);
875 }
876 }
877 }
878}
879
880impl GeneralSlab {
881 fn new(
884 new_slab_id: SlabId,
885 mesh_allocation: &mut Option<MeshAllocation>,
886 settings: &MeshAllocatorSettings,
887 layout: ElementLayout,
888 data_slot_count: u32,
889 ) -> GeneralSlab {
890 let initial_slab_slot_capacity = (settings.min_slab_size.div_ceil(layout.slot_size())
891 as u32)
892 .max(offset_allocator::ext::min_allocator_size(data_slot_count));
893 let max_slab_slot_capacity = (settings.max_slab_size.div_ceil(layout.slot_size()) as u32)
894 .max(offset_allocator::ext::min_allocator_size(data_slot_count));
895
896 let mut new_slab = GeneralSlab {
897 allocator: Allocator::new(max_slab_slot_capacity),
898 buffer: None,
899 resident_allocations: HashMap::default(),
900 pending_allocations: HashMap::default(),
901 element_layout: layout,
902 current_slot_capacity: initial_slab_slot_capacity,
903 };
904
905 if let Some(allocation) = new_slab.allocator.allocate(data_slot_count) {
907 *mesh_allocation = Some(MeshAllocation {
908 slab_id: new_slab_id,
909 slab_allocation: SlabAllocation {
910 slot_count: data_slot_count,
911 allocation,
912 },
913 });
914 }
915
916 new_slab
917 }
918
919 fn grow_if_necessary(
925 &mut self,
926 new_size_in_slots: u32,
927 settings: &MeshAllocatorSettings,
928 ) -> SlabGrowthResult {
929 let initial_slot_capacity = self.current_slot_capacity;
931 if self.current_slot_capacity >= new_size_in_slots {
932 return SlabGrowthResult::NoGrowthNeeded;
933 }
934
935 while self.current_slot_capacity < new_size_in_slots {
938 let new_slab_slot_capacity =
939 ((self.current_slot_capacity as f64 * settings.growth_factor).ceil() as u32)
940 .min((settings.max_slab_size / self.element_layout.slot_size()) as u32);
941 if new_slab_slot_capacity == self.current_slot_capacity {
942 return SlabGrowthResult::CantGrow;
944 }
945
946 self.current_slot_capacity = new_slab_slot_capacity;
947 }
948
949 SlabGrowthResult::NeededGrowth(SlabToReallocate {
951 old_slot_capacity: initial_slot_capacity,
952 })
953 }
954}
955
956impl ElementLayout {
957 fn new(class: ElementClass, size: u64) -> ElementLayout {
960 const {
961 assert!(4 == COPY_BUFFER_ALIGNMENT);
962 }
963 let elements_per_slot = [1, 4, 2, 4][size as usize & 3];
966 ElementLayout {
967 class,
968 size,
969 elements_per_slot,
972 }
973 }
974
975 fn slot_size(&self) -> u64 {
976 self.size * self.elements_per_slot as u64
977 }
978
979 fn vertex(
982 mesh_vertex_buffer_layouts: &mut MeshVertexBufferLayouts,
983 mesh: &Mesh,
984 ) -> ElementLayout {
985 let mesh_vertex_buffer_layout =
986 mesh.get_mesh_vertex_buffer_layout(mesh_vertex_buffer_layouts);
987 ElementLayout::new(
988 ElementClass::Vertex,
989 mesh_vertex_buffer_layout.0.layout().array_stride,
990 )
991 }
992
993 fn index(mesh: &Mesh) -> Option<ElementLayout> {
996 let size = match mesh.indices()? {
997 Indices::U16(_) => 2,
998 Indices::U32(_) => 4,
999 };
1000 Some(ElementLayout::new(ElementClass::Index, size))
1001 }
1002}
1003
1004impl GeneralSlab {
1005 fn is_empty(&self) -> bool {
1007 self.resident_allocations.is_empty() && self.pending_allocations.is_empty()
1008 }
1009}
1010
1011fn buffer_usages_to_str(buffer_usages: BufferUsages) -> &'static str {
1013 if buffer_usages.contains(BufferUsages::VERTEX) {
1014 "vertex "
1015 } else if buffer_usages.contains(BufferUsages::INDEX) {
1016 "index "
1017 } else {
1018 ""
1019 }
1020}