bevy_render/batching/
no_gpu_preprocessing.rs1use bevy_derive::{Deref, DerefMut};
4use bevy_ecs::entity::Entity;
5use bevy_ecs::resource::Resource;
6use bevy_ecs::system::{Res, ResMut, StaticSystemParam};
7use smallvec::{smallvec, SmallVec};
8use tracing::error;
9use wgpu::BindingResource;
10
11use crate::{
12 render_phase::{
13 BinnedPhaseItem, BinnedRenderPhaseBatch, BinnedRenderPhaseBatchSets,
14 CachedRenderPipelinePhaseItem, PhaseItemExtraIndex, SortedPhaseItem,
15 ViewBinnedRenderPhases, ViewSortedRenderPhases,
16 },
17 render_resource::{GpuArrayBuffer, GpuArrayBufferable},
18 renderer::{RenderDevice, RenderQueue},
19};
20
21use super::{GetBatchData, GetFullBatchData};
22
23#[derive(Resource, Deref, DerefMut)]
28pub struct BatchedInstanceBuffer<BD>(pub GpuArrayBuffer<BD>)
29where
30 BD: GpuArrayBufferable + Sync + Send + 'static;
31
32impl<BD> BatchedInstanceBuffer<BD>
33where
34 BD: GpuArrayBufferable + Sync + Send + 'static,
35{
36 pub fn new(render_device: &RenderDevice) -> Self {
38 BatchedInstanceBuffer(GpuArrayBuffer::new(render_device))
39 }
40
41 pub fn instance_data_binding(&self) -> Option<BindingResource> {
46 self.binding()
47 }
48}
49
50pub fn clear_batched_cpu_instance_buffers<GBD>(
54 cpu_batched_instance_buffer: Option<ResMut<BatchedInstanceBuffer<GBD::BufferData>>>,
55) where
56 GBD: GetBatchData,
57{
58 if let Some(mut cpu_batched_instance_buffer) = cpu_batched_instance_buffer {
59 cpu_batched_instance_buffer.clear();
60 }
61}
62
63pub fn batch_and_prepare_sorted_render_phase<I, GBD>(
67 batched_instance_buffer: ResMut<BatchedInstanceBuffer<GBD::BufferData>>,
68 mut phases: ResMut<ViewSortedRenderPhases<I>>,
69 param: StaticSystemParam<GBD::Param>,
70) where
71 I: CachedRenderPipelinePhaseItem + SortedPhaseItem,
72 GBD: GetBatchData,
73{
74 let system_param_item = param.into_inner();
75
76 let batched_instance_buffer = batched_instance_buffer.into_inner();
78
79 for phase in phases.values_mut() {
80 super::batch_and_prepare_sorted_render_phase::<I, GBD>(phase, |item| {
81 let (buffer_data, compare_data) =
82 GBD::get_batch_data(&system_param_item, (item.entity(), item.main_entity()))?;
83 let buffer_index = batched_instance_buffer.push(buffer_data);
84
85 let index = buffer_index.index;
86 let (batch_range, extra_index) = item.batch_range_and_extra_index_mut();
87 *batch_range = index..index + 1;
88 *extra_index = PhaseItemExtraIndex::maybe_dynamic_offset(buffer_index.dynamic_offset);
89
90 compare_data
91 });
92 }
93}
94
95pub fn batch_and_prepare_binned_render_phase<BPI, GFBD>(
98 gpu_array_buffer: ResMut<BatchedInstanceBuffer<GFBD::BufferData>>,
99 mut phases: ResMut<ViewBinnedRenderPhases<BPI>>,
100 param: StaticSystemParam<GFBD::Param>,
101) where
102 BPI: BinnedPhaseItem,
103 GFBD: GetFullBatchData,
104{
105 let gpu_array_buffer = gpu_array_buffer.into_inner();
106 let system_param_item = param.into_inner();
107
108 for phase in phases.values_mut() {
109 for bin in phase.batchable_meshes.values_mut() {
112 let mut batch_set: SmallVec<[BinnedRenderPhaseBatch; 1]> = smallvec![];
113 for main_entity in bin.entities().keys() {
114 let Some(buffer_data) =
115 GFBD::get_binned_batch_data(&system_param_item, *main_entity)
116 else {
117 continue;
118 };
119 let instance = gpu_array_buffer.push(buffer_data);
120
121 if !batch_set.last().is_some_and(|batch| {
127 batch.instance_range.end == instance.index
128 && batch.extra_index
129 == PhaseItemExtraIndex::maybe_dynamic_offset(instance.dynamic_offset)
130 }) {
131 batch_set.push(BinnedRenderPhaseBatch {
132 representative_entity: (Entity::PLACEHOLDER, *main_entity),
133 instance_range: instance.index..instance.index,
134 extra_index: PhaseItemExtraIndex::maybe_dynamic_offset(
135 instance.dynamic_offset,
136 ),
137 });
138 }
139
140 if let Some(batch) = batch_set.last_mut() {
141 batch.instance_range.end = instance.index + 1;
142 }
143 }
144
145 match phase.batch_sets {
146 BinnedRenderPhaseBatchSets::DynamicUniforms(ref mut batch_sets) => {
147 batch_sets.push(batch_set);
148 }
149 BinnedRenderPhaseBatchSets::Direct(_)
150 | BinnedRenderPhaseBatchSets::MultidrawIndirect { .. } => {
151 error!(
152 "Dynamic uniform batch sets should be used when GPU preprocessing is off"
153 );
154 }
155 }
156 }
157
158 for unbatchables in phase.unbatchable_meshes.values_mut() {
160 for main_entity in unbatchables.entities.keys() {
161 let Some(buffer_data) =
162 GFBD::get_binned_batch_data(&system_param_item, *main_entity)
163 else {
164 continue;
165 };
166 let instance = gpu_array_buffer.push(buffer_data);
167 unbatchables.buffer_indices.add(instance.into());
168 }
169 }
170 }
171}
172
173pub fn write_batched_instance_buffer<GBD>(
175 render_device: Res<RenderDevice>,
176 render_queue: Res<RenderQueue>,
177 mut cpu_batched_instance_buffer: ResMut<BatchedInstanceBuffer<GBD::BufferData>>,
178) where
179 GBD: GetBatchData,
180{
181 cpu_batched_instance_buffer.write_buffer(&render_device, &render_queue);
182}