bevy_pbr/render/
gpu_preprocess.rs

1//! GPU mesh preprocessing.
2//!
3//! This is an optional pass that uses a compute shader to reduce the amount of
4//! data that has to be transferred from the CPU to the GPU. When enabled,
5//! instead of transferring [`MeshUniform`]s to the GPU, we transfer the smaller
6//! [`MeshInputUniform`]s instead and use the GPU to calculate the remaining
7//! derived fields in [`MeshUniform`].
8
9use core::num::{NonZero, NonZeroU64};
10
11use bevy_app::{App, Plugin};
12use bevy_asset::{load_internal_asset, weak_handle, Handle};
13use bevy_core_pipeline::{
14    core_3d::graph::{Core3d, Node3d},
15    experimental::mip_generation::ViewDepthPyramid,
16    prepass::{DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms},
17};
18use bevy_derive::{Deref, DerefMut};
19use bevy_ecs::{
20    component::Component,
21    entity::Entity,
22    prelude::resource_exists,
23    query::{Has, Or, QueryState, With, Without},
24    resource::Resource,
25    schedule::IntoScheduleConfigs as _,
26    system::{lifetimeless::Read, Commands, Query, Res, ResMut},
27    world::{FromWorld, World},
28};
29use bevy_render::batching::gpu_preprocessing::{
30    GpuPreprocessingMode, IndirectParametersGpuMetadata, UntypedPhaseIndirectParametersBuffers,
31};
32use bevy_render::{
33    batching::gpu_preprocessing::{
34        BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingSupport,
35        IndirectBatchSet, IndirectParametersBuffers, IndirectParametersCpuMetadata,
36        IndirectParametersIndexed, IndirectParametersNonIndexed,
37        LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem, PreprocessWorkItemBuffers,
38        UntypedPhaseBatchedInstanceBuffers,
39    },
40    experimental::occlusion_culling::OcclusionCulling,
41    render_graph::{Node, NodeRunError, RenderGraphApp, RenderGraphContext},
42    render_resource::{
43        binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer},
44        BindGroup, BindGroupEntries, BindGroupLayout, BindingResource, Buffer, BufferBinding,
45        CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor,
46        DynamicBindGroupLayoutEntries, PipelineCache, PushConstantRange, RawBufferVec, Shader,
47        ShaderStages, ShaderType, SpecializedComputePipeline, SpecializedComputePipelines,
48        TextureSampleType, UninitBufferVec,
49    },
50    renderer::{RenderContext, RenderDevice, RenderQueue},
51    settings::WgpuFeatures,
52    view::{ExtractedView, NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms},
53    Render, RenderApp, RenderSet,
54};
55use bevy_utils::TypeIdMap;
56use bitflags::bitflags;
57use smallvec::{smallvec, SmallVec};
58use tracing::warn;
59
60use crate::{
61    graph::NodePbr, MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform,
62};
63
64use super::{ShadowView, ViewLightEntities};
65
66/// The handle to the `mesh_preprocess.wgsl` compute shader.
67pub const MESH_PREPROCESS_SHADER_HANDLE: Handle<Shader> =
68    weak_handle!("c8579292-cf92-43b5-9c5a-ec5bd4e44d12");
69/// The handle to the `reset_indirect_batch_sets.wgsl` compute shader.
70pub const RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE: Handle<Shader> =
71    weak_handle!("045fb176-58e2-4e76-b241-7688d761bb23");
72/// The handle to the `build_indirect_params.wgsl` compute shader.
73pub const BUILD_INDIRECT_PARAMS_SHADER_HANDLE: Handle<Shader> =
74    weak_handle!("133b01f0-3eaf-4590-9ee9-f0cf91a00b71");
75
76/// The GPU workgroup size.
77const WORKGROUP_SIZE: usize = 64;
78
79/// A plugin that builds mesh uniforms on GPU.
80///
81/// This will only be added if the platform supports compute shaders (e.g. not
82/// on WebGL 2).
83pub struct GpuMeshPreprocessPlugin {
84    /// Whether we're building [`MeshUniform`]s on GPU.
85    ///
86    /// This requires compute shader support and so will be forcibly disabled if
87    /// the platform doesn't support those.
88    pub use_gpu_instance_buffer_builder: bool,
89}
90
91/// The render node that clears out the GPU-side indirect metadata buffers.
92///
93/// This is only used when indirect drawing is enabled.
94#[derive(Default)]
95pub struct ClearIndirectParametersMetadataNode;
96
97/// The render node for the first mesh preprocessing pass.
98///
99/// This pass runs a compute shader to cull meshes outside the view frustum (if
100/// that wasn't done by the CPU), cull meshes that weren't visible last frame
101/// (if occlusion culling is on), transform them, and, if indirect drawing is
102/// on, populate indirect draw parameter metadata for the subsequent
103/// [`EarlyPrepassBuildIndirectParametersNode`].
104pub struct EarlyGpuPreprocessNode {
105    view_query: QueryState<
106        (
107            Read<ExtractedView>,
108            Option<Read<PreprocessBindGroups>>,
109            Option<Read<ViewUniformOffset>>,
110            Has<NoIndirectDrawing>,
111            Has<OcclusionCulling>,
112        ),
113        Without<SkipGpuPreprocess>,
114    >,
115    main_view_query: QueryState<Read<ViewLightEntities>>,
116}
117
118/// The render node for the second mesh preprocessing pass.
119///
120/// This pass runs a compute shader to cull meshes outside the view frustum (if
121/// that wasn't done by the CPU), cull meshes that were neither visible last
122/// frame nor visible this frame (if occlusion culling is on), transform them,
123/// and, if indirect drawing is on, populate the indirect draw parameter
124/// metadata for the subsequent [`LatePrepassBuildIndirectParametersNode`].
125pub struct LateGpuPreprocessNode {
126    view_query: QueryState<
127        (
128            Read<ExtractedView>,
129            Read<PreprocessBindGroups>,
130            Read<ViewUniformOffset>,
131        ),
132        (
133            Without<SkipGpuPreprocess>,
134            Without<NoIndirectDrawing>,
135            With<OcclusionCulling>,
136            With<DepthPrepass>,
137        ),
138    >,
139}
140
141/// The render node for the part of the indirect parameter building pass that
142/// draws the meshes visible from the previous frame.
143///
144/// This node runs a compute shader on the output of the
145/// [`EarlyGpuPreprocessNode`] in order to transform the
146/// [`IndirectParametersGpuMetadata`] into properly-formatted
147/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`].
148pub struct EarlyPrepassBuildIndirectParametersNode {
149    view_query: QueryState<
150        Read<PreprocessBindGroups>,
151        (
152            Without<SkipGpuPreprocess>,
153            Without<NoIndirectDrawing>,
154            Or<(With<DepthPrepass>, With<ShadowView>)>,
155        ),
156    >,
157}
158
159/// The render node for the part of the indirect parameter building pass that
160/// draws the meshes that are potentially visible on this frame but weren't
161/// visible on the previous frame.
162///
163/// This node runs a compute shader on the output of the
164/// [`LateGpuPreprocessNode`] in order to transform the
165/// [`IndirectParametersGpuMetadata`] into properly-formatted
166/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`].
167pub struct LatePrepassBuildIndirectParametersNode {
168    view_query: QueryState<
169        Read<PreprocessBindGroups>,
170        (
171            Without<SkipGpuPreprocess>,
172            Without<NoIndirectDrawing>,
173            Or<(With<DepthPrepass>, With<ShadowView>)>,
174            With<OcclusionCulling>,
175        ),
176    >,
177}
178
179/// The render node for the part of the indirect parameter building pass that
180/// draws all meshes, both those that are newly-visible on this frame and those
181/// that were visible last frame.
182///
183/// This node runs a compute shader on the output of the
184/// [`EarlyGpuPreprocessNode`] and [`LateGpuPreprocessNode`] in order to
185/// transform the [`IndirectParametersGpuMetadata`] into properly-formatted
186/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`].
187pub struct MainBuildIndirectParametersNode {
188    view_query: QueryState<
189        Read<PreprocessBindGroups>,
190        (Without<SkipGpuPreprocess>, Without<NoIndirectDrawing>),
191    >,
192}
193
194/// The compute shader pipelines for the GPU mesh preprocessing and indirect
195/// parameter building passes.
196#[derive(Resource)]
197pub struct PreprocessPipelines {
198    /// The pipeline used for CPU culling. This pipeline doesn't populate
199    /// indirect parameter metadata.
200    pub direct_preprocess: PreprocessPipeline,
201    /// The pipeline used for mesh preprocessing when GPU frustum culling is in
202    /// use, but occlusion culling isn't.
203    ///
204    /// This pipeline populates indirect parameter metadata.
205    pub gpu_frustum_culling_preprocess: PreprocessPipeline,
206    /// The pipeline used for the first phase of occlusion culling.
207    ///
208    /// This pipeline culls, transforms meshes, and populates indirect parameter
209    /// metadata.
210    pub early_gpu_occlusion_culling_preprocess: PreprocessPipeline,
211    /// The pipeline used for the second phase of occlusion culling.
212    ///
213    /// This pipeline culls, transforms meshes, and populates indirect parameter
214    /// metadata.
215    pub late_gpu_occlusion_culling_preprocess: PreprocessPipeline,
216    /// The pipeline that builds indirect draw parameters for indexed meshes,
217    /// when frustum culling is enabled but occlusion culling *isn't* enabled.
218    pub gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
219    /// The pipeline that builds indirect draw parameters for non-indexed
220    /// meshes, when frustum culling is enabled but occlusion culling *isn't*
221    /// enabled.
222    pub gpu_frustum_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
223    /// Compute shader pipelines for the early prepass phase that draws meshes
224    /// visible in the previous frame.
225    pub early_phase: PreprocessPhasePipelines,
226    /// Compute shader pipelines for the late prepass phase that draws meshes
227    /// that weren't visible in the previous frame, but became visible this
228    /// frame.
229    pub late_phase: PreprocessPhasePipelines,
230    /// Compute shader pipelines for the main color phase.
231    pub main_phase: PreprocessPhasePipelines,
232}
233
234/// Compute shader pipelines for a specific phase: early, late, or main.
235///
236/// The distinction between these phases is relevant for occlusion culling.
237#[derive(Clone)]
238pub struct PreprocessPhasePipelines {
239    /// The pipeline that resets the indirect draw counts used in
240    /// `multi_draw_indirect_count` to 0 in preparation for a new pass.
241    pub reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline,
242    /// The pipeline used for indexed indirect parameter building.
243    ///
244    /// This pipeline converts indirect parameter metadata into indexed indirect
245    /// parameters.
246    pub gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
247    /// The pipeline used for non-indexed indirect parameter building.
248    ///
249    /// This pipeline converts indirect parameter metadata into non-indexed
250    /// indirect parameters.
251    pub gpu_occlusion_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
252}
253
254/// The pipeline for the GPU mesh preprocessing shader.
255pub struct PreprocessPipeline {
256    /// The bind group layout for the compute shader.
257    pub bind_group_layout: BindGroupLayout,
258    /// The pipeline ID for the compute shader.
259    ///
260    /// This gets filled in `prepare_preprocess_pipelines`.
261    pub pipeline_id: Option<CachedComputePipelineId>,
262}
263
264/// The pipeline for the batch set count reset shader.
265///
266/// This shader resets the indirect batch set count to 0 for each view. It runs
267/// in between every phase (early, late, and main).
268#[derive(Clone)]
269pub struct ResetIndirectBatchSetsPipeline {
270    /// The bind group layout for the compute shader.
271    pub bind_group_layout: BindGroupLayout,
272    /// The pipeline ID for the compute shader.
273    ///
274    /// This gets filled in `prepare_preprocess_pipelines`.
275    pub pipeline_id: Option<CachedComputePipelineId>,
276}
277
278/// The pipeline for the indirect parameter building shader.
279#[derive(Clone)]
280pub struct BuildIndirectParametersPipeline {
281    /// The bind group layout for the compute shader.
282    pub bind_group_layout: BindGroupLayout,
283    /// The pipeline ID for the compute shader.
284    ///
285    /// This gets filled in `prepare_preprocess_pipelines`.
286    pub pipeline_id: Option<CachedComputePipelineId>,
287}
288
289bitflags! {
290    /// Specifies variants of the mesh preprocessing shader.
291    #[derive(Clone, Copy, PartialEq, Eq, Hash)]
292    pub struct PreprocessPipelineKey: u8 {
293        /// Whether GPU frustum culling is in use.
294        ///
295        /// This `#define`'s `FRUSTUM_CULLING` in the shader.
296        const FRUSTUM_CULLING = 1;
297        /// Whether GPU two-phase occlusion culling is in use.
298        ///
299        /// This `#define`'s `OCCLUSION_CULLING` in the shader.
300        const OCCLUSION_CULLING = 2;
301        /// Whether this is the early phase of GPU two-phase occlusion culling.
302        ///
303        /// This `#define`'s `EARLY_PHASE` in the shader.
304        const EARLY_PHASE = 4;
305    }
306
307    /// Specifies variants of the indirect parameter building shader.
308    #[derive(Clone, Copy, PartialEq, Eq, Hash)]
309    pub struct BuildIndirectParametersPipelineKey: u8 {
310        /// Whether the indirect parameter building shader is processing indexed
311        /// meshes (those that have index buffers).
312        ///
313        /// This defines `INDEXED` in the shader.
314        const INDEXED = 1;
315        /// Whether the GPU and driver supports `multi_draw_indirect_count`.
316        ///
317        /// This defines `MULTI_DRAW_INDIRECT_COUNT_SUPPORTED` in the shader.
318        const MULTI_DRAW_INDIRECT_COUNT_SUPPORTED = 2;
319        /// Whether GPU two-phase occlusion culling is in use.
320        ///
321        /// This `#define`'s `OCCLUSION_CULLING` in the shader.
322        const OCCLUSION_CULLING = 4;
323        /// Whether this is the early phase of GPU two-phase occlusion culling.
324        ///
325        /// This `#define`'s `EARLY_PHASE` in the shader.
326        const EARLY_PHASE = 8;
327        /// Whether this is the late phase of GPU two-phase occlusion culling.
328        ///
329        /// This `#define`'s `LATE_PHASE` in the shader.
330        const LATE_PHASE = 16;
331        /// Whether this is the phase that runs after the early and late phases,
332        /// and right before the main drawing logic, when GPU two-phase
333        /// occlusion culling is in use.
334        ///
335        /// This `#define`'s `MAIN_PHASE` in the shader.
336        const MAIN_PHASE = 32;
337    }
338}
339
340/// The compute shader bind group for the mesh preprocessing pass for each
341/// render phase.
342///
343/// This goes on the view. It maps the [`core::any::TypeId`] of a render phase
344/// (e.g.  [`bevy_core_pipeline::core_3d::Opaque3d`]) to the
345/// [`PhasePreprocessBindGroups`] for that phase.
346#[derive(Component, Clone, Deref, DerefMut)]
347pub struct PreprocessBindGroups(pub TypeIdMap<PhasePreprocessBindGroups>);
348
349/// The compute shader bind group for the mesh preprocessing step for a single
350/// render phase on a single view.
351#[derive(Clone)]
352pub enum PhasePreprocessBindGroups {
353    /// The bind group used for the single invocation of the compute shader when
354    /// indirect drawing is *not* being used.
355    ///
356    /// Because direct drawing doesn't require splitting the meshes into indexed
357    /// and non-indexed meshes, there's only one bind group in this case.
358    Direct(BindGroup),
359
360    /// The bind groups used for the compute shader when indirect drawing is
361    /// being used, but occlusion culling isn't being used.
362    ///
363    /// Because indirect drawing requires splitting the meshes into indexed and
364    /// non-indexed meshes, there are two bind groups here.
365    IndirectFrustumCulling {
366        /// The bind group for indexed meshes.
367        indexed: Option<BindGroup>,
368        /// The bind group for non-indexed meshes.
369        non_indexed: Option<BindGroup>,
370    },
371
372    /// The bind groups used for the compute shader when indirect drawing is
373    /// being used, but occlusion culling isn't being used.
374    ///
375    /// Because indirect drawing requires splitting the meshes into indexed and
376    /// non-indexed meshes, and because occlusion culling requires splitting
377    /// this phase into early and late versions, there are four bind groups
378    /// here.
379    IndirectOcclusionCulling {
380        /// The bind group for indexed meshes during the early mesh
381        /// preprocessing phase.
382        early_indexed: Option<BindGroup>,
383        /// The bind group for non-indexed meshes during the early mesh
384        /// preprocessing phase.
385        early_non_indexed: Option<BindGroup>,
386        /// The bind group for indexed meshes during the late mesh preprocessing
387        /// phase.
388        late_indexed: Option<BindGroup>,
389        /// The bind group for non-indexed meshes during the late mesh
390        /// preprocessing phase.
391        late_non_indexed: Option<BindGroup>,
392    },
393}
394
395/// The bind groups for the compute shaders that reset indirect draw counts and
396/// build indirect parameters.
397///
398/// There's one set of bind group for each phase. Phases are keyed off their
399/// [`core::any::TypeId`].
400#[derive(Resource, Default, Deref, DerefMut)]
401pub struct BuildIndirectParametersBindGroups(pub TypeIdMap<PhaseBuildIndirectParametersBindGroups>);
402
403impl BuildIndirectParametersBindGroups {
404    /// Creates a new, empty [`BuildIndirectParametersBindGroups`] table.
405    pub fn new() -> BuildIndirectParametersBindGroups {
406        Self::default()
407    }
408}
409
410/// The per-phase set of bind groups for the compute shaders that reset indirect
411/// draw counts and build indirect parameters.
412pub struct PhaseBuildIndirectParametersBindGroups {
413    /// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for
414    /// indexed meshes.
415    reset_indexed_indirect_batch_sets: Option<BindGroup>,
416    /// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for
417    /// non-indexed meshes.
418    reset_non_indexed_indirect_batch_sets: Option<BindGroup>,
419    /// The bind group for the `build_indirect_params.wgsl` shader, for indexed
420    /// meshes.
421    build_indexed_indirect: Option<BindGroup>,
422    /// The bind group for the `build_indirect_params.wgsl` shader, for
423    /// non-indexed meshes.
424    build_non_indexed_indirect: Option<BindGroup>,
425}
426
427/// Stops the `GpuPreprocessNode` attempting to generate the buffer for this view
428/// useful to avoid duplicating effort if the bind group is shared between views
429#[derive(Component, Default)]
430pub struct SkipGpuPreprocess;
431
432impl Plugin for GpuMeshPreprocessPlugin {
433    fn build(&self, app: &mut App) {
434        load_internal_asset!(
435            app,
436            MESH_PREPROCESS_SHADER_HANDLE,
437            "mesh_preprocess.wgsl",
438            Shader::from_wgsl
439        );
440        load_internal_asset!(
441            app,
442            RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE,
443            "reset_indirect_batch_sets.wgsl",
444            Shader::from_wgsl
445        );
446        load_internal_asset!(
447            app,
448            BUILD_INDIRECT_PARAMS_SHADER_HANDLE,
449            "build_indirect_params.wgsl",
450            Shader::from_wgsl
451        );
452    }
453
454    fn finish(&self, app: &mut App) {
455        let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
456            return;
457        };
458
459        // This plugin does nothing if GPU instance buffer building isn't in
460        // use.
461        let gpu_preprocessing_support = render_app.world().resource::<GpuPreprocessingSupport>();
462        if !self.use_gpu_instance_buffer_builder || !gpu_preprocessing_support.is_available() {
463            return;
464        }
465
466        render_app
467            .init_resource::<PreprocessPipelines>()
468            .init_resource::<SpecializedComputePipelines<PreprocessPipeline>>()
469            .init_resource::<SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>>()
470            .init_resource::<SpecializedComputePipelines<BuildIndirectParametersPipeline>>()
471            .add_systems(
472                Render,
473                (
474                    prepare_preprocess_pipelines.in_set(RenderSet::Prepare),
475                    prepare_preprocess_bind_groups
476                        .run_if(resource_exists::<BatchedInstanceBuffers<
477                            MeshUniform,
478                            MeshInputUniform
479                        >>)
480                        .in_set(RenderSet::PrepareBindGroups),
481                    write_mesh_culling_data_buffer.in_set(RenderSet::PrepareResourcesFlush),
482                ),
483            )
484            .add_render_graph_node::<ClearIndirectParametersMetadataNode>(
485                Core3d,
486                NodePbr::ClearIndirectParametersMetadata
487            )
488            .add_render_graph_node::<EarlyGpuPreprocessNode>(Core3d, NodePbr::EarlyGpuPreprocess)
489            .add_render_graph_node::<LateGpuPreprocessNode>(Core3d, NodePbr::LateGpuPreprocess)
490            .add_render_graph_node::<EarlyPrepassBuildIndirectParametersNode>(
491                Core3d,
492                NodePbr::EarlyPrepassBuildIndirectParameters,
493            )
494            .add_render_graph_node::<LatePrepassBuildIndirectParametersNode>(
495                Core3d,
496                NodePbr::LatePrepassBuildIndirectParameters,
497            )
498            .add_render_graph_node::<MainBuildIndirectParametersNode>(
499                Core3d,
500                NodePbr::MainBuildIndirectParameters,
501            )
502            .add_render_graph_edges(
503                Core3d,
504                (
505                    NodePbr::ClearIndirectParametersMetadata,
506                    NodePbr::EarlyGpuPreprocess,
507                    NodePbr::EarlyPrepassBuildIndirectParameters,
508                    Node3d::EarlyPrepass,
509                    Node3d::EarlyDeferredPrepass,
510                    Node3d::EarlyDownsampleDepth,
511                    NodePbr::LateGpuPreprocess,
512                    NodePbr::LatePrepassBuildIndirectParameters,
513                    Node3d::LatePrepass,
514                    Node3d::LateDeferredPrepass,
515                    NodePbr::MainBuildIndirectParameters,
516                    Node3d::StartMainPass,
517                ),
518            ).add_render_graph_edges(
519                Core3d,
520                (
521                    NodePbr::EarlyPrepassBuildIndirectParameters,
522                    NodePbr::EarlyShadowPass,
523                    Node3d::EarlyDownsampleDepth,
524                )
525            ).add_render_graph_edges(
526                Core3d,
527                (
528                    NodePbr::LatePrepassBuildIndirectParameters,
529                    NodePbr::LateShadowPass,
530                    NodePbr::MainBuildIndirectParameters,
531                )
532            );
533    }
534}
535
536impl Node for ClearIndirectParametersMetadataNode {
537    fn run<'w>(
538        &self,
539        _: &mut RenderGraphContext,
540        render_context: &mut RenderContext<'w>,
541        world: &'w World,
542    ) -> Result<(), NodeRunError> {
543        let Some(indirect_parameters_buffers) = world.get_resource::<IndirectParametersBuffers>()
544        else {
545            return Ok(());
546        };
547
548        // Clear out each indexed and non-indexed GPU-side buffer.
549        for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() {
550            if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
551                .indexed
552                .gpu_metadata_buffer()
553            {
554                render_context.command_encoder().clear_buffer(
555                    indexed_gpu_metadata_buffer,
556                    0,
557                    Some(
558                        phase_indirect_parameters_buffers.indexed.batch_count() as u64
559                            * size_of::<IndirectParametersGpuMetadata>() as u64,
560                    ),
561                );
562            }
563
564            if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
565                .non_indexed
566                .gpu_metadata_buffer()
567            {
568                render_context.command_encoder().clear_buffer(
569                    non_indexed_gpu_metadata_buffer,
570                    0,
571                    Some(
572                        phase_indirect_parameters_buffers.non_indexed.batch_count() as u64
573                            * size_of::<IndirectParametersGpuMetadata>() as u64,
574                    ),
575                );
576            }
577        }
578
579        Ok(())
580    }
581}
582
583impl FromWorld for EarlyGpuPreprocessNode {
584    fn from_world(world: &mut World) -> Self {
585        Self {
586            view_query: QueryState::new(world),
587            main_view_query: QueryState::new(world),
588        }
589    }
590}
591
592impl Node for EarlyGpuPreprocessNode {
593    fn update(&mut self, world: &mut World) {
594        self.view_query.update_archetypes(world);
595        self.main_view_query.update_archetypes(world);
596    }
597
598    fn run<'w>(
599        &self,
600        graph: &mut RenderGraphContext,
601        render_context: &mut RenderContext<'w>,
602        world: &'w World,
603    ) -> Result<(), NodeRunError> {
604        // Grab the [`BatchedInstanceBuffers`].
605        let batched_instance_buffers =
606            world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
607
608        let pipeline_cache = world.resource::<PipelineCache>();
609        let preprocess_pipelines = world.resource::<PreprocessPipelines>();
610
611        let mut compute_pass =
612            render_context
613                .command_encoder()
614                .begin_compute_pass(&ComputePassDescriptor {
615                    label: Some("early mesh preprocessing"),
616                    timestamp_writes: None,
617                });
618
619        let mut all_views: SmallVec<[_; 8]> = SmallVec::new();
620        all_views.push(graph.view_entity());
621        if let Ok(shadow_cascade_views) =
622            self.main_view_query.get_manual(world, graph.view_entity())
623        {
624            all_views.extend(shadow_cascade_views.lights.iter().copied());
625        }
626
627        // Run the compute passes.
628
629        for view_entity in all_views {
630            let Ok((
631                view,
632                bind_groups,
633                view_uniform_offset,
634                no_indirect_drawing,
635                occlusion_culling,
636            )) = self.view_query.get_manual(world, view_entity)
637            else {
638                continue;
639            };
640
641            let Some(bind_groups) = bind_groups else {
642                continue;
643            };
644            let Some(view_uniform_offset) = view_uniform_offset else {
645                continue;
646            };
647
648            // Select the right pipeline, depending on whether GPU culling is in
649            // use.
650            let maybe_pipeline_id = if no_indirect_drawing {
651                preprocess_pipelines.direct_preprocess.pipeline_id
652            } else if occlusion_culling {
653                preprocess_pipelines
654                    .early_gpu_occlusion_culling_preprocess
655                    .pipeline_id
656            } else {
657                preprocess_pipelines
658                    .gpu_frustum_culling_preprocess
659                    .pipeline_id
660            };
661
662            // Fetch the pipeline.
663            let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
664                warn!("The build mesh uniforms pipeline wasn't ready");
665                continue;
666            };
667
668            let Some(preprocess_pipeline) =
669                pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
670            else {
671                // This will happen while the pipeline is being compiled and is fine.
672                continue;
673            };
674
675            compute_pass.set_pipeline(preprocess_pipeline);
676
677            // Loop over each render phase.
678            for (phase_type_id, batched_phase_instance_buffers) in
679                &batched_instance_buffers.phase_instance_buffers
680            {
681                // Grab the work item buffers for this view.
682                let Some(work_item_buffers) = batched_phase_instance_buffers
683                    .work_item_buffers
684                    .get(&view.retained_view_entity)
685                else {
686                    continue;
687                };
688
689                // Fetch the bind group for the render phase.
690                let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else {
691                    continue;
692                };
693
694                // Make sure the mesh preprocessing shader has access to the
695                // view info it needs to do culling and motion vector
696                // computation.
697                let dynamic_offsets = [view_uniform_offset.offset];
698
699                // Are we drawing directly or indirectly?
700                match *phase_bind_groups {
701                    PhasePreprocessBindGroups::Direct(ref bind_group) => {
702                        // Invoke the mesh preprocessing shader to transform
703                        // meshes only, but not cull.
704                        let PreprocessWorkItemBuffers::Direct(work_item_buffer) = work_item_buffers
705                        else {
706                            continue;
707                        };
708                        compute_pass.set_bind_group(0, bind_group, &dynamic_offsets);
709                        let workgroup_count = work_item_buffer.len().div_ceil(WORKGROUP_SIZE);
710                        if workgroup_count > 0 {
711                            compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
712                        }
713                    }
714
715                    PhasePreprocessBindGroups::IndirectFrustumCulling {
716                        indexed: ref maybe_indexed_bind_group,
717                        non_indexed: ref maybe_non_indexed_bind_group,
718                    }
719                    | PhasePreprocessBindGroups::IndirectOcclusionCulling {
720                        early_indexed: ref maybe_indexed_bind_group,
721                        early_non_indexed: ref maybe_non_indexed_bind_group,
722                        ..
723                    } => {
724                        // Invoke the mesh preprocessing shader to transform and
725                        // cull the meshes.
726                        let PreprocessWorkItemBuffers::Indirect {
727                            indexed: indexed_buffer,
728                            non_indexed: non_indexed_buffer,
729                            ..
730                        } = work_item_buffers
731                        else {
732                            continue;
733                        };
734
735                        // Transform and cull indexed meshes if there are any.
736                        if let Some(indexed_bind_group) = maybe_indexed_bind_group {
737                            if let PreprocessWorkItemBuffers::Indirect {
738                                gpu_occlusion_culling:
739                                    Some(GpuOcclusionCullingWorkItemBuffers {
740                                        late_indirect_parameters_indexed_offset,
741                                        ..
742                                    }),
743                                ..
744                            } = *work_item_buffers
745                            {
746                                compute_pass.set_push_constants(
747                                    0,
748                                    bytemuck::bytes_of(&late_indirect_parameters_indexed_offset),
749                                );
750                            }
751
752                            compute_pass.set_bind_group(0, indexed_bind_group, &dynamic_offsets);
753                            let workgroup_count = indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
754                            if workgroup_count > 0 {
755                                compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
756                            }
757                        }
758
759                        // Transform and cull non-indexed meshes if there are any.
760                        if let Some(non_indexed_bind_group) = maybe_non_indexed_bind_group {
761                            if let PreprocessWorkItemBuffers::Indirect {
762                                gpu_occlusion_culling:
763                                    Some(GpuOcclusionCullingWorkItemBuffers {
764                                        late_indirect_parameters_non_indexed_offset,
765                                        ..
766                                    }),
767                                ..
768                            } = *work_item_buffers
769                            {
770                                compute_pass.set_push_constants(
771                                    0,
772                                    bytemuck::bytes_of(
773                                        &late_indirect_parameters_non_indexed_offset,
774                                    ),
775                                );
776                            }
777
778                            compute_pass.set_bind_group(
779                                0,
780                                non_indexed_bind_group,
781                                &dynamic_offsets,
782                            );
783                            let workgroup_count = non_indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
784                            if workgroup_count > 0 {
785                                compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
786                            }
787                        }
788                    }
789                }
790            }
791        }
792
793        Ok(())
794    }
795}
796
797impl FromWorld for EarlyPrepassBuildIndirectParametersNode {
798    fn from_world(world: &mut World) -> Self {
799        Self {
800            view_query: QueryState::new(world),
801        }
802    }
803}
804
805impl FromWorld for LatePrepassBuildIndirectParametersNode {
806    fn from_world(world: &mut World) -> Self {
807        Self {
808            view_query: QueryState::new(world),
809        }
810    }
811}
812
813impl FromWorld for MainBuildIndirectParametersNode {
814    fn from_world(world: &mut World) -> Self {
815        Self {
816            view_query: QueryState::new(world),
817        }
818    }
819}
820
821impl FromWorld for LateGpuPreprocessNode {
822    fn from_world(world: &mut World) -> Self {
823        Self {
824            view_query: QueryState::new(world),
825        }
826    }
827}
828
829impl Node for LateGpuPreprocessNode {
830    fn update(&mut self, world: &mut World) {
831        self.view_query.update_archetypes(world);
832    }
833
834    fn run<'w>(
835        &self,
836        _: &mut RenderGraphContext,
837        render_context: &mut RenderContext<'w>,
838        world: &'w World,
839    ) -> Result<(), NodeRunError> {
840        // Grab the [`BatchedInstanceBuffers`].
841        let batched_instance_buffers =
842            world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
843
844        let pipeline_cache = world.resource::<PipelineCache>();
845        let preprocess_pipelines = world.resource::<PreprocessPipelines>();
846
847        let mut compute_pass =
848            render_context
849                .command_encoder()
850                .begin_compute_pass(&ComputePassDescriptor {
851                    label: Some("late mesh preprocessing"),
852                    timestamp_writes: None,
853                });
854
855        // Run the compute passes.
856        for (view, bind_groups, view_uniform_offset) in self.view_query.iter_manual(world) {
857            let maybe_pipeline_id = preprocess_pipelines
858                .late_gpu_occlusion_culling_preprocess
859                .pipeline_id;
860
861            // Fetch the pipeline.
862            let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
863                warn!("The build mesh uniforms pipeline wasn't ready");
864                return Ok(());
865            };
866
867            let Some(preprocess_pipeline) =
868                pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
869            else {
870                // This will happen while the pipeline is being compiled and is fine.
871                return Ok(());
872            };
873
874            compute_pass.set_pipeline(preprocess_pipeline);
875
876            // Loop over each phase. Because we built the phases in parallel,
877            // each phase has a separate set of instance buffers.
878            for (phase_type_id, batched_phase_instance_buffers) in
879                &batched_instance_buffers.phase_instance_buffers
880            {
881                let UntypedPhaseBatchedInstanceBuffers {
882                    ref work_item_buffers,
883                    ref late_indexed_indirect_parameters_buffer,
884                    ref late_non_indexed_indirect_parameters_buffer,
885                    ..
886                } = *batched_phase_instance_buffers;
887
888                // Grab the work item buffers for this view.
889                let Some(phase_work_item_buffers) =
890                    work_item_buffers.get(&view.retained_view_entity)
891                else {
892                    continue;
893                };
894
895                let (
896                    PreprocessWorkItemBuffers::Indirect {
897                        gpu_occlusion_culling:
898                            Some(GpuOcclusionCullingWorkItemBuffers {
899                                late_indirect_parameters_indexed_offset,
900                                late_indirect_parameters_non_indexed_offset,
901                                ..
902                            }),
903                        ..
904                    },
905                    Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
906                        late_indexed: maybe_late_indexed_bind_group,
907                        late_non_indexed: maybe_late_non_indexed_bind_group,
908                        ..
909                    }),
910                    Some(late_indexed_indirect_parameters_buffer),
911                    Some(late_non_indexed_indirect_parameters_buffer),
912                ) = (
913                    phase_work_item_buffers,
914                    bind_groups.get(phase_type_id),
915                    late_indexed_indirect_parameters_buffer.buffer(),
916                    late_non_indexed_indirect_parameters_buffer.buffer(),
917                )
918                else {
919                    continue;
920                };
921
922                let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![];
923                dynamic_offsets.push(view_uniform_offset.offset);
924
925                // If there's no space reserved for work items, then don't
926                // bother doing the dispatch, as there can't possibly be any
927                // meshes of the given class (indexed or non-indexed) in this
928                // phase.
929
930                // Transform and cull indexed meshes if there are any.
931                if let Some(late_indexed_bind_group) = maybe_late_indexed_bind_group {
932                    compute_pass.set_push_constants(
933                        0,
934                        bytemuck::bytes_of(late_indirect_parameters_indexed_offset),
935                    );
936
937                    compute_pass.set_bind_group(0, late_indexed_bind_group, &dynamic_offsets);
938                    compute_pass.dispatch_workgroups_indirect(
939                        late_indexed_indirect_parameters_buffer,
940                        (*late_indirect_parameters_indexed_offset as u64)
941                            * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
942                    );
943                }
944
945                // Transform and cull non-indexed meshes if there are any.
946                if let Some(late_non_indexed_bind_group) = maybe_late_non_indexed_bind_group {
947                    compute_pass.set_push_constants(
948                        0,
949                        bytemuck::bytes_of(late_indirect_parameters_non_indexed_offset),
950                    );
951
952                    compute_pass.set_bind_group(0, late_non_indexed_bind_group, &dynamic_offsets);
953                    compute_pass.dispatch_workgroups_indirect(
954                        late_non_indexed_indirect_parameters_buffer,
955                        (*late_indirect_parameters_non_indexed_offset as u64)
956                            * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
957                    );
958                }
959            }
960        }
961
962        Ok(())
963    }
964}
965
966impl Node for EarlyPrepassBuildIndirectParametersNode {
967    fn update(&mut self, world: &mut World) {
968        self.view_query.update_archetypes(world);
969    }
970
971    fn run<'w>(
972        &self,
973        _: &mut RenderGraphContext,
974        render_context: &mut RenderContext<'w>,
975        world: &'w World,
976    ) -> Result<(), NodeRunError> {
977        let preprocess_pipelines = world.resource::<PreprocessPipelines>();
978
979        // If there are no views with a depth prepass enabled, we don't need to
980        // run this.
981        if self.view_query.iter_manual(world).next().is_none() {
982            return Ok(());
983        }
984
985        run_build_indirect_parameters_node(
986            render_context,
987            world,
988            &preprocess_pipelines.early_phase,
989            "early prepass indirect parameters building",
990        )
991    }
992}
993
994impl Node for LatePrepassBuildIndirectParametersNode {
995    fn update(&mut self, world: &mut World) {
996        self.view_query.update_archetypes(world);
997    }
998
999    fn run<'w>(
1000        &self,
1001        _: &mut RenderGraphContext,
1002        render_context: &mut RenderContext<'w>,
1003        world: &'w World,
1004    ) -> Result<(), NodeRunError> {
1005        let preprocess_pipelines = world.resource::<PreprocessPipelines>();
1006
1007        // If there are no views with occlusion culling enabled, we don't need
1008        // to run this.
1009        if self.view_query.iter_manual(world).next().is_none() {
1010            return Ok(());
1011        }
1012
1013        run_build_indirect_parameters_node(
1014            render_context,
1015            world,
1016            &preprocess_pipelines.late_phase,
1017            "late prepass indirect parameters building",
1018        )
1019    }
1020}
1021
1022impl Node for MainBuildIndirectParametersNode {
1023    fn update(&mut self, world: &mut World) {
1024        self.view_query.update_archetypes(world);
1025    }
1026
1027    fn run<'w>(
1028        &self,
1029        _: &mut RenderGraphContext,
1030        render_context: &mut RenderContext<'w>,
1031        world: &'w World,
1032    ) -> Result<(), NodeRunError> {
1033        let preprocess_pipelines = world.resource::<PreprocessPipelines>();
1034
1035        run_build_indirect_parameters_node(
1036            render_context,
1037            world,
1038            &preprocess_pipelines.main_phase,
1039            "main indirect parameters building",
1040        )
1041    }
1042}
1043
1044fn run_build_indirect_parameters_node(
1045    render_context: &mut RenderContext,
1046    world: &World,
1047    preprocess_phase_pipelines: &PreprocessPhasePipelines,
1048    label: &'static str,
1049) -> Result<(), NodeRunError> {
1050    let Some(build_indirect_params_bind_groups) =
1051        world.get_resource::<BuildIndirectParametersBindGroups>()
1052    else {
1053        return Ok(());
1054    };
1055
1056    let pipeline_cache = world.resource::<PipelineCache>();
1057    let indirect_parameters_buffers = world.resource::<IndirectParametersBuffers>();
1058
1059    let mut compute_pass =
1060        render_context
1061            .command_encoder()
1062            .begin_compute_pass(&ComputePassDescriptor {
1063                label: Some(label),
1064                timestamp_writes: None,
1065            });
1066
1067    // Fetch the pipeline.
1068    let (
1069        Some(reset_indirect_batch_sets_pipeline_id),
1070        Some(build_indexed_indirect_params_pipeline_id),
1071        Some(build_non_indexed_indirect_params_pipeline_id),
1072    ) = (
1073        preprocess_phase_pipelines
1074            .reset_indirect_batch_sets
1075            .pipeline_id,
1076        preprocess_phase_pipelines
1077            .gpu_occlusion_culling_build_indexed_indirect_params
1078            .pipeline_id,
1079        preprocess_phase_pipelines
1080            .gpu_occlusion_culling_build_non_indexed_indirect_params
1081            .pipeline_id,
1082    )
1083    else {
1084        warn!("The build indirect parameters pipelines weren't ready");
1085        return Ok(());
1086    };
1087
1088    let (
1089        Some(reset_indirect_batch_sets_pipeline),
1090        Some(build_indexed_indirect_params_pipeline),
1091        Some(build_non_indexed_indirect_params_pipeline),
1092    ) = (
1093        pipeline_cache.get_compute_pipeline(reset_indirect_batch_sets_pipeline_id),
1094        pipeline_cache.get_compute_pipeline(build_indexed_indirect_params_pipeline_id),
1095        pipeline_cache.get_compute_pipeline(build_non_indexed_indirect_params_pipeline_id),
1096    )
1097    else {
1098        // This will happen while the pipeline is being compiled and is fine.
1099        return Ok(());
1100    };
1101
1102    // Loop over each phase. As each has as separate set of buffers, we need to
1103    // build indirect parameters individually for each phase.
1104    for (phase_type_id, phase_build_indirect_params_bind_groups) in
1105        build_indirect_params_bind_groups.iter()
1106    {
1107        let Some(phase_indirect_parameters_buffers) =
1108            indirect_parameters_buffers.get(phase_type_id)
1109        else {
1110            continue;
1111        };
1112
1113        // Build indexed indirect parameters.
1114        if let (
1115            Some(reset_indexed_indirect_batch_sets_bind_group),
1116            Some(build_indirect_indexed_params_bind_group),
1117        ) = (
1118            &phase_build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets,
1119            &phase_build_indirect_params_bind_groups.build_indexed_indirect,
1120        ) {
1121            compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1122            compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]);
1123            let workgroup_count = phase_indirect_parameters_buffers
1124                .batch_set_count(true)
1125                .div_ceil(WORKGROUP_SIZE);
1126            if workgroup_count > 0 {
1127                compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1128            }
1129
1130            compute_pass.set_pipeline(build_indexed_indirect_params_pipeline);
1131            compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]);
1132            let workgroup_count = phase_indirect_parameters_buffers
1133                .indexed
1134                .batch_count()
1135                .div_ceil(WORKGROUP_SIZE);
1136            if workgroup_count > 0 {
1137                compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1138            }
1139        }
1140
1141        // Build non-indexed indirect parameters.
1142        if let (
1143            Some(reset_non_indexed_indirect_batch_sets_bind_group),
1144            Some(build_indirect_non_indexed_params_bind_group),
1145        ) = (
1146            &phase_build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets,
1147            &phase_build_indirect_params_bind_groups.build_non_indexed_indirect,
1148        ) {
1149            compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1150            compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]);
1151            let workgroup_count = phase_indirect_parameters_buffers
1152                .batch_set_count(false)
1153                .div_ceil(WORKGROUP_SIZE);
1154            if workgroup_count > 0 {
1155                compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1156            }
1157
1158            compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline);
1159            compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]);
1160            let workgroup_count = phase_indirect_parameters_buffers
1161                .non_indexed
1162                .batch_count()
1163                .div_ceil(WORKGROUP_SIZE);
1164            if workgroup_count > 0 {
1165                compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1166            }
1167        }
1168    }
1169
1170    Ok(())
1171}
1172
1173impl PreprocessPipelines {
1174    /// Returns true if the preprocessing and indirect parameters pipelines have
1175    /// been loaded or false otherwise.
1176    pub(crate) fn pipelines_are_loaded(
1177        &self,
1178        pipeline_cache: &PipelineCache,
1179        preprocessing_support: &GpuPreprocessingSupport,
1180    ) -> bool {
1181        match preprocessing_support.max_supported_mode {
1182            GpuPreprocessingMode::None => false,
1183            GpuPreprocessingMode::PreprocessingOnly => {
1184                self.direct_preprocess.is_loaded(pipeline_cache)
1185                    && self
1186                        .gpu_frustum_culling_preprocess
1187                        .is_loaded(pipeline_cache)
1188            }
1189            GpuPreprocessingMode::Culling => {
1190                self.direct_preprocess.is_loaded(pipeline_cache)
1191                    && self
1192                        .gpu_frustum_culling_preprocess
1193                        .is_loaded(pipeline_cache)
1194                    && self
1195                        .early_gpu_occlusion_culling_preprocess
1196                        .is_loaded(pipeline_cache)
1197                    && self
1198                        .late_gpu_occlusion_culling_preprocess
1199                        .is_loaded(pipeline_cache)
1200                    && self
1201                        .gpu_frustum_culling_build_indexed_indirect_params
1202                        .is_loaded(pipeline_cache)
1203                    && self
1204                        .gpu_frustum_culling_build_non_indexed_indirect_params
1205                        .is_loaded(pipeline_cache)
1206                    && self.early_phase.is_loaded(pipeline_cache)
1207                    && self.late_phase.is_loaded(pipeline_cache)
1208                    && self.main_phase.is_loaded(pipeline_cache)
1209            }
1210        }
1211    }
1212}
1213
1214impl PreprocessPhasePipelines {
1215    fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1216        self.reset_indirect_batch_sets.is_loaded(pipeline_cache)
1217            && self
1218                .gpu_occlusion_culling_build_indexed_indirect_params
1219                .is_loaded(pipeline_cache)
1220            && self
1221                .gpu_occlusion_culling_build_non_indexed_indirect_params
1222                .is_loaded(pipeline_cache)
1223    }
1224}
1225
1226impl PreprocessPipeline {
1227    fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1228        self.pipeline_id
1229            .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1230    }
1231}
1232
1233impl ResetIndirectBatchSetsPipeline {
1234    fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1235        self.pipeline_id
1236            .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1237    }
1238}
1239
1240impl BuildIndirectParametersPipeline {
1241    /// Returns true if this pipeline has been loaded into the pipeline cache or
1242    /// false otherwise.
1243    fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1244        self.pipeline_id
1245            .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1246    }
1247}
1248
1249impl SpecializedComputePipeline for PreprocessPipeline {
1250    type Key = PreprocessPipelineKey;
1251
1252    fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1253        let mut shader_defs = vec!["WRITE_INDIRECT_PARAMETERS_METADATA".into()];
1254        if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1255            shader_defs.push("INDIRECT".into());
1256            shader_defs.push("FRUSTUM_CULLING".into());
1257        }
1258        if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1259            shader_defs.push("OCCLUSION_CULLING".into());
1260            if key.contains(PreprocessPipelineKey::EARLY_PHASE) {
1261                shader_defs.push("EARLY_PHASE".into());
1262            } else {
1263                shader_defs.push("LATE_PHASE".into());
1264            }
1265        }
1266
1267        ComputePipelineDescriptor {
1268            label: Some(
1269                format!(
1270                    "mesh preprocessing ({})",
1271                    if key.contains(
1272                        PreprocessPipelineKey::OCCLUSION_CULLING
1273                            | PreprocessPipelineKey::EARLY_PHASE
1274                    ) {
1275                        "early GPU occlusion culling"
1276                    } else if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1277                        "late GPU occlusion culling"
1278                    } else if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1279                        "GPU frustum culling"
1280                    } else {
1281                        "direct"
1282                    }
1283                )
1284                .into(),
1285            ),
1286            layout: vec![self.bind_group_layout.clone()],
1287            push_constant_ranges: if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1288                vec![PushConstantRange {
1289                    stages: ShaderStages::COMPUTE,
1290                    range: 0..4,
1291                }]
1292            } else {
1293                vec![]
1294            },
1295            shader: MESH_PREPROCESS_SHADER_HANDLE,
1296            shader_defs,
1297            entry_point: "main".into(),
1298            zero_initialize_workgroup_memory: false,
1299        }
1300    }
1301}
1302
1303impl FromWorld for PreprocessPipelines {
1304    fn from_world(world: &mut World) -> Self {
1305        let render_device = world.resource::<RenderDevice>();
1306
1307        // GPU culling bind group parameters are a superset of those in the CPU
1308        // culling (direct) shader.
1309        let direct_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries();
1310        let gpu_frustum_culling_bind_group_layout_entries = gpu_culling_bind_group_layout_entries();
1311        let gpu_early_occlusion_culling_bind_group_layout_entries =
1312            gpu_occlusion_culling_bind_group_layout_entries().extend_with_indices(((
1313                11,
1314                storage_buffer::<PreprocessWorkItem>(/*has_dynamic_offset=*/ false),
1315            ),));
1316        let gpu_late_occlusion_culling_bind_group_layout_entries =
1317            gpu_occlusion_culling_bind_group_layout_entries();
1318
1319        let reset_indirect_batch_sets_bind_group_layout_entries =
1320            DynamicBindGroupLayoutEntries::sequential(
1321                ShaderStages::COMPUTE,
1322                (storage_buffer::<IndirectBatchSet>(false),),
1323            );
1324
1325        // Indexed and non-indexed bind group parameters share all the bind
1326        // group layout entries except the final one.
1327        let build_indexed_indirect_params_bind_group_layout_entries =
1328            build_indirect_params_bind_group_layout_entries()
1329                .extend_sequential((storage_buffer::<IndirectParametersIndexed>(false),));
1330        let build_non_indexed_indirect_params_bind_group_layout_entries =
1331            build_indirect_params_bind_group_layout_entries()
1332                .extend_sequential((storage_buffer::<IndirectParametersNonIndexed>(false),));
1333
1334        // Create the bind group layouts.
1335        let direct_bind_group_layout = render_device.create_bind_group_layout(
1336            "build mesh uniforms direct bind group layout",
1337            &direct_bind_group_layout_entries,
1338        );
1339        let gpu_frustum_culling_bind_group_layout = render_device.create_bind_group_layout(
1340            "build mesh uniforms GPU frustum culling bind group layout",
1341            &gpu_frustum_culling_bind_group_layout_entries,
1342        );
1343        let gpu_early_occlusion_culling_bind_group_layout = render_device.create_bind_group_layout(
1344            "build mesh uniforms GPU early occlusion culling bind group layout",
1345            &gpu_early_occlusion_culling_bind_group_layout_entries,
1346        );
1347        let gpu_late_occlusion_culling_bind_group_layout = render_device.create_bind_group_layout(
1348            "build mesh uniforms GPU late occlusion culling bind group layout",
1349            &gpu_late_occlusion_culling_bind_group_layout_entries,
1350        );
1351        let reset_indirect_batch_sets_bind_group_layout = render_device.create_bind_group_layout(
1352            "reset indirect batch sets bind group layout",
1353            &reset_indirect_batch_sets_bind_group_layout_entries,
1354        );
1355        let build_indexed_indirect_params_bind_group_layout = render_device
1356            .create_bind_group_layout(
1357                "build indexed indirect parameters bind group layout",
1358                &build_indexed_indirect_params_bind_group_layout_entries,
1359            );
1360        let build_non_indexed_indirect_params_bind_group_layout = render_device
1361            .create_bind_group_layout(
1362                "build non-indexed indirect parameters bind group layout",
1363                &build_non_indexed_indirect_params_bind_group_layout_entries,
1364            );
1365
1366        let preprocess_phase_pipelines = PreprocessPhasePipelines {
1367            reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline {
1368                bind_group_layout: reset_indirect_batch_sets_bind_group_layout.clone(),
1369                pipeline_id: None,
1370            },
1371            gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1372                bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1373                pipeline_id: None,
1374            },
1375            gpu_occlusion_culling_build_non_indexed_indirect_params:
1376                BuildIndirectParametersPipeline {
1377                    bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1378                    pipeline_id: None,
1379                },
1380        };
1381
1382        PreprocessPipelines {
1383            direct_preprocess: PreprocessPipeline {
1384                bind_group_layout: direct_bind_group_layout,
1385                pipeline_id: None,
1386            },
1387            gpu_frustum_culling_preprocess: PreprocessPipeline {
1388                bind_group_layout: gpu_frustum_culling_bind_group_layout,
1389                pipeline_id: None,
1390            },
1391            early_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1392                bind_group_layout: gpu_early_occlusion_culling_bind_group_layout,
1393                pipeline_id: None,
1394            },
1395            late_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1396                bind_group_layout: gpu_late_occlusion_culling_bind_group_layout,
1397                pipeline_id: None,
1398            },
1399            gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1400                bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1401                pipeline_id: None,
1402            },
1403            gpu_frustum_culling_build_non_indexed_indirect_params:
1404                BuildIndirectParametersPipeline {
1405                    bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1406                    pipeline_id: None,
1407                },
1408            early_phase: preprocess_phase_pipelines.clone(),
1409            late_phase: preprocess_phase_pipelines.clone(),
1410            main_phase: preprocess_phase_pipelines.clone(),
1411        }
1412    }
1413}
1414
1415fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1416    DynamicBindGroupLayoutEntries::new_with_indices(
1417        ShaderStages::COMPUTE,
1418        (
1419            // `view`
1420            (
1421                0,
1422                uniform_buffer::<ViewUniform>(/* has_dynamic_offset= */ true),
1423            ),
1424            // `current_input`
1425            (3, storage_buffer_read_only::<MeshInputUniform>(false)),
1426            // `previous_input`
1427            (4, storage_buffer_read_only::<MeshInputUniform>(false)),
1428            // `indices`
1429            (5, storage_buffer_read_only::<PreprocessWorkItem>(false)),
1430            // `output`
1431            (6, storage_buffer::<MeshUniform>(false)),
1432        ),
1433    )
1434}
1435
1436// Returns the first 4 bind group layout entries shared between all invocations
1437// of the indirect parameters building shader.
1438fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1439    DynamicBindGroupLayoutEntries::new_with_indices(
1440        ShaderStages::COMPUTE,
1441        (
1442            (0, storage_buffer_read_only::<MeshInputUniform>(false)),
1443            (
1444                1,
1445                storage_buffer_read_only::<IndirectParametersCpuMetadata>(false),
1446            ),
1447            (
1448                2,
1449                storage_buffer_read_only::<IndirectParametersGpuMetadata>(false),
1450            ),
1451            (3, storage_buffer::<IndirectBatchSet>(false)),
1452        ),
1453    )
1454}
1455
1456/// A system that specializes the `mesh_preprocess.wgsl` and
1457/// `build_indirect_params.wgsl` pipelines if necessary.
1458fn gpu_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1459    // GPU culling bind group parameters are a superset of those in the CPU
1460    // culling (direct) shader.
1461    preprocess_direct_bind_group_layout_entries().extend_with_indices((
1462        // `indirect_parameters_cpu_metadata`
1463        (
1464            7,
1465            storage_buffer_read_only::<IndirectParametersCpuMetadata>(
1466                /* has_dynamic_offset= */ false,
1467            ),
1468        ),
1469        // `indirect_parameters_gpu_metadata`
1470        (
1471            8,
1472            storage_buffer::<IndirectParametersGpuMetadata>(/* has_dynamic_offset= */ false),
1473        ),
1474        // `mesh_culling_data`
1475        (
1476            9,
1477            storage_buffer_read_only::<MeshCullingData>(/* has_dynamic_offset= */ false),
1478        ),
1479    ))
1480}
1481
1482fn gpu_occlusion_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1483    gpu_culling_bind_group_layout_entries().extend_with_indices((
1484        (
1485            2,
1486            uniform_buffer::<PreviousViewData>(/*has_dynamic_offset=*/ false),
1487        ),
1488        (
1489            10,
1490            texture_2d(TextureSampleType::Float { filterable: true }),
1491        ),
1492        (
1493            12,
1494            storage_buffer::<LatePreprocessWorkItemIndirectParameters>(
1495                /*has_dynamic_offset=*/ false,
1496            ),
1497        ),
1498    ))
1499}
1500
1501/// A system that specializes the `mesh_preprocess.wgsl` pipelines if necessary.
1502pub fn prepare_preprocess_pipelines(
1503    pipeline_cache: Res<PipelineCache>,
1504    render_device: Res<RenderDevice>,
1505    mut specialized_preprocess_pipelines: ResMut<SpecializedComputePipelines<PreprocessPipeline>>,
1506    mut specialized_reset_indirect_batch_sets_pipelines: ResMut<
1507        SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1508    >,
1509    mut specialized_build_indirect_parameters_pipelines: ResMut<
1510        SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1511    >,
1512    preprocess_pipelines: ResMut<PreprocessPipelines>,
1513    gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1514) {
1515    let preprocess_pipelines = preprocess_pipelines.into_inner();
1516
1517    preprocess_pipelines.direct_preprocess.prepare(
1518        &pipeline_cache,
1519        &mut specialized_preprocess_pipelines,
1520        PreprocessPipelineKey::empty(),
1521    );
1522    preprocess_pipelines.gpu_frustum_culling_preprocess.prepare(
1523        &pipeline_cache,
1524        &mut specialized_preprocess_pipelines,
1525        PreprocessPipelineKey::FRUSTUM_CULLING,
1526    );
1527
1528    if gpu_preprocessing_support.is_culling_supported() {
1529        preprocess_pipelines
1530            .early_gpu_occlusion_culling_preprocess
1531            .prepare(
1532                &pipeline_cache,
1533                &mut specialized_preprocess_pipelines,
1534                PreprocessPipelineKey::FRUSTUM_CULLING
1535                    | PreprocessPipelineKey::OCCLUSION_CULLING
1536                    | PreprocessPipelineKey::EARLY_PHASE,
1537            );
1538        preprocess_pipelines
1539            .late_gpu_occlusion_culling_preprocess
1540            .prepare(
1541                &pipeline_cache,
1542                &mut specialized_preprocess_pipelines,
1543                PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING,
1544            );
1545    }
1546
1547    let mut build_indirect_parameters_pipeline_key = BuildIndirectParametersPipelineKey::empty();
1548
1549    // If the GPU and driver support `multi_draw_indirect_count`, tell the
1550    // shader that.
1551    if render_device
1552        .wgpu_device()
1553        .features()
1554        .contains(WgpuFeatures::MULTI_DRAW_INDIRECT_COUNT)
1555    {
1556        build_indirect_parameters_pipeline_key
1557            .insert(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED);
1558    }
1559
1560    preprocess_pipelines
1561        .gpu_frustum_culling_build_indexed_indirect_params
1562        .prepare(
1563            &pipeline_cache,
1564            &mut specialized_build_indirect_parameters_pipelines,
1565            build_indirect_parameters_pipeline_key | BuildIndirectParametersPipelineKey::INDEXED,
1566        );
1567    preprocess_pipelines
1568        .gpu_frustum_culling_build_non_indexed_indirect_params
1569        .prepare(
1570            &pipeline_cache,
1571            &mut specialized_build_indirect_parameters_pipelines,
1572            build_indirect_parameters_pipeline_key,
1573        );
1574
1575    if !gpu_preprocessing_support.is_culling_supported() {
1576        return;
1577    }
1578
1579    for (preprocess_phase_pipelines, build_indirect_parameters_phase_pipeline_key) in [
1580        (
1581            &mut preprocess_pipelines.early_phase,
1582            BuildIndirectParametersPipelineKey::EARLY_PHASE,
1583        ),
1584        (
1585            &mut preprocess_pipelines.late_phase,
1586            BuildIndirectParametersPipelineKey::LATE_PHASE,
1587        ),
1588        (
1589            &mut preprocess_pipelines.main_phase,
1590            BuildIndirectParametersPipelineKey::MAIN_PHASE,
1591        ),
1592    ] {
1593        preprocess_phase_pipelines
1594            .reset_indirect_batch_sets
1595            .prepare(
1596                &pipeline_cache,
1597                &mut specialized_reset_indirect_batch_sets_pipelines,
1598            );
1599        preprocess_phase_pipelines
1600            .gpu_occlusion_culling_build_indexed_indirect_params
1601            .prepare(
1602                &pipeline_cache,
1603                &mut specialized_build_indirect_parameters_pipelines,
1604                build_indirect_parameters_pipeline_key
1605                    | build_indirect_parameters_phase_pipeline_key
1606                    | BuildIndirectParametersPipelineKey::INDEXED
1607                    | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1608            );
1609        preprocess_phase_pipelines
1610            .gpu_occlusion_culling_build_non_indexed_indirect_params
1611            .prepare(
1612                &pipeline_cache,
1613                &mut specialized_build_indirect_parameters_pipelines,
1614                build_indirect_parameters_pipeline_key
1615                    | build_indirect_parameters_phase_pipeline_key
1616                    | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1617            );
1618    }
1619}
1620
1621impl PreprocessPipeline {
1622    fn prepare(
1623        &mut self,
1624        pipeline_cache: &PipelineCache,
1625        pipelines: &mut SpecializedComputePipelines<PreprocessPipeline>,
1626        key: PreprocessPipelineKey,
1627    ) {
1628        if self.pipeline_id.is_some() {
1629            return;
1630        }
1631
1632        let preprocess_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1633        self.pipeline_id = Some(preprocess_pipeline_id);
1634    }
1635}
1636
1637impl SpecializedComputePipeline for ResetIndirectBatchSetsPipeline {
1638    type Key = ();
1639
1640    fn specialize(&self, _: Self::Key) -> ComputePipelineDescriptor {
1641        ComputePipelineDescriptor {
1642            label: Some("reset indirect batch sets".into()),
1643            layout: vec![self.bind_group_layout.clone()],
1644            push_constant_ranges: vec![],
1645            shader: RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE,
1646            shader_defs: vec![],
1647            entry_point: "main".into(),
1648            zero_initialize_workgroup_memory: false,
1649        }
1650    }
1651}
1652
1653impl SpecializedComputePipeline for BuildIndirectParametersPipeline {
1654    type Key = BuildIndirectParametersPipelineKey;
1655
1656    fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1657        let mut shader_defs = vec![];
1658        if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1659            shader_defs.push("INDEXED".into());
1660        }
1661        if key.contains(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED) {
1662            shader_defs.push("MULTI_DRAW_INDIRECT_COUNT_SUPPORTED".into());
1663        }
1664        if key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1665            shader_defs.push("OCCLUSION_CULLING".into());
1666        }
1667        if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1668            shader_defs.push("EARLY_PHASE".into());
1669        }
1670        if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1671            shader_defs.push("LATE_PHASE".into());
1672        }
1673        if key.contains(BuildIndirectParametersPipelineKey::MAIN_PHASE) {
1674            shader_defs.push("MAIN_PHASE".into());
1675        }
1676
1677        let label = format!(
1678            "{} build {}indexed indirect parameters",
1679            if !key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1680                "frustum culling"
1681            } else if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1682                "early occlusion culling"
1683            } else if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1684                "late occlusion culling"
1685            } else {
1686                "main occlusion culling"
1687            },
1688            if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1689                ""
1690            } else {
1691                "non-"
1692            }
1693        );
1694
1695        ComputePipelineDescriptor {
1696            label: Some(label.into()),
1697            layout: vec![self.bind_group_layout.clone()],
1698            push_constant_ranges: vec![],
1699            shader: BUILD_INDIRECT_PARAMS_SHADER_HANDLE,
1700            shader_defs,
1701            entry_point: "main".into(),
1702            zero_initialize_workgroup_memory: false,
1703        }
1704    }
1705}
1706
1707impl ResetIndirectBatchSetsPipeline {
1708    fn prepare(
1709        &mut self,
1710        pipeline_cache: &PipelineCache,
1711        pipelines: &mut SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1712    ) {
1713        if self.pipeline_id.is_some() {
1714            return;
1715        }
1716
1717        let reset_indirect_batch_sets_pipeline_id = pipelines.specialize(pipeline_cache, self, ());
1718        self.pipeline_id = Some(reset_indirect_batch_sets_pipeline_id);
1719    }
1720}
1721
1722impl BuildIndirectParametersPipeline {
1723    fn prepare(
1724        &mut self,
1725        pipeline_cache: &PipelineCache,
1726        pipelines: &mut SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1727        key: BuildIndirectParametersPipelineKey,
1728    ) {
1729        if self.pipeline_id.is_some() {
1730            return;
1731        }
1732
1733        let build_indirect_parameters_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1734        self.pipeline_id = Some(build_indirect_parameters_pipeline_id);
1735    }
1736}
1737
1738/// A system that attaches the mesh uniform buffers to the bind groups for the
1739/// variants of the mesh preprocessing compute shader.
1740#[expect(
1741    clippy::too_many_arguments,
1742    reason = "it's a system that needs a lot of arguments"
1743)]
1744pub fn prepare_preprocess_bind_groups(
1745    mut commands: Commands,
1746    views: Query<(Entity, &ExtractedView)>,
1747    view_depth_pyramids: Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1748    render_device: Res<RenderDevice>,
1749    batched_instance_buffers: Res<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
1750    indirect_parameters_buffers: Res<IndirectParametersBuffers>,
1751    mesh_culling_data_buffer: Res<MeshCullingDataBuffer>,
1752    view_uniforms: Res<ViewUniforms>,
1753    previous_view_uniforms: Res<PreviousViewUniforms>,
1754    pipelines: Res<PreprocessPipelines>,
1755) {
1756    // Grab the `BatchedInstanceBuffers`.
1757    let BatchedInstanceBuffers {
1758        current_input_buffer: current_input_buffer_vec,
1759        previous_input_buffer: previous_input_buffer_vec,
1760        phase_instance_buffers,
1761    } = batched_instance_buffers.into_inner();
1762
1763    let (Some(current_input_buffer), Some(previous_input_buffer)) = (
1764        current_input_buffer_vec.buffer().buffer(),
1765        previous_input_buffer_vec.buffer().buffer(),
1766    ) else {
1767        return;
1768    };
1769
1770    // Record whether we have any meshes that are to be drawn indirectly. If we
1771    // don't, then we can skip building indirect parameters.
1772    let mut any_indirect = false;
1773
1774    // Loop over each view.
1775    for (view_entity, view) in &views {
1776        let mut bind_groups = TypeIdMap::default();
1777
1778        // Loop over each phase.
1779        for (phase_type_id, phase_instance_buffers) in phase_instance_buffers {
1780            let UntypedPhaseBatchedInstanceBuffers {
1781                data_buffer: ref data_buffer_vec,
1782                ref work_item_buffers,
1783                ref late_indexed_indirect_parameters_buffer,
1784                ref late_non_indexed_indirect_parameters_buffer,
1785            } = *phase_instance_buffers;
1786
1787            let Some(data_buffer) = data_buffer_vec.buffer() else {
1788                continue;
1789            };
1790
1791            // Grab the indirect parameters buffers for this phase.
1792            let Some(phase_indirect_parameters_buffers) =
1793                indirect_parameters_buffers.get(phase_type_id)
1794            else {
1795                continue;
1796            };
1797
1798            let Some(work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) else {
1799                continue;
1800            };
1801
1802            // Create the `PreprocessBindGroupBuilder`.
1803            let preprocess_bind_group_builder = PreprocessBindGroupBuilder {
1804                view: view_entity,
1805                late_indexed_indirect_parameters_buffer,
1806                late_non_indexed_indirect_parameters_buffer,
1807                render_device: &render_device,
1808                phase_indirect_parameters_buffers,
1809                mesh_culling_data_buffer: &mesh_culling_data_buffer,
1810                view_uniforms: &view_uniforms,
1811                previous_view_uniforms: &previous_view_uniforms,
1812                pipelines: &pipelines,
1813                current_input_buffer,
1814                previous_input_buffer,
1815                data_buffer,
1816            };
1817
1818            // Depending on the type of work items we have, construct the
1819            // appropriate bind groups.
1820            let (was_indirect, bind_group) = match *work_item_buffers {
1821                PreprocessWorkItemBuffers::Direct(ref work_item_buffer) => (
1822                    false,
1823                    preprocess_bind_group_builder
1824                        .create_direct_preprocess_bind_groups(work_item_buffer),
1825                ),
1826
1827                PreprocessWorkItemBuffers::Indirect {
1828                    indexed: ref indexed_work_item_buffer,
1829                    non_indexed: ref non_indexed_work_item_buffer,
1830                    gpu_occlusion_culling: Some(ref gpu_occlusion_culling_work_item_buffers),
1831                } => (
1832                    true,
1833                    preprocess_bind_group_builder
1834                        .create_indirect_occlusion_culling_preprocess_bind_groups(
1835                            &view_depth_pyramids,
1836                            indexed_work_item_buffer,
1837                            non_indexed_work_item_buffer,
1838                            gpu_occlusion_culling_work_item_buffers,
1839                        ),
1840                ),
1841
1842                PreprocessWorkItemBuffers::Indirect {
1843                    indexed: ref indexed_work_item_buffer,
1844                    non_indexed: ref non_indexed_work_item_buffer,
1845                    gpu_occlusion_culling: None,
1846                } => (
1847                    true,
1848                    preprocess_bind_group_builder
1849                        .create_indirect_frustum_culling_preprocess_bind_groups(
1850                            indexed_work_item_buffer,
1851                            non_indexed_work_item_buffer,
1852                        ),
1853                ),
1854            };
1855
1856            // Write that bind group in.
1857            if let Some(bind_group) = bind_group {
1858                any_indirect = any_indirect || was_indirect;
1859                bind_groups.insert(*phase_type_id, bind_group);
1860            }
1861        }
1862
1863        // Save the bind groups.
1864        commands
1865            .entity(view_entity)
1866            .insert(PreprocessBindGroups(bind_groups));
1867    }
1868
1869    // Now, if there were any indirect draw commands, create the bind groups for
1870    // the indirect parameters building shader.
1871    if any_indirect {
1872        create_build_indirect_parameters_bind_groups(
1873            &mut commands,
1874            &render_device,
1875            &pipelines,
1876            current_input_buffer,
1877            &indirect_parameters_buffers,
1878        );
1879    }
1880}
1881
1882/// A temporary structure that stores all the information needed to construct
1883/// bind groups for the mesh preprocessing shader.
1884struct PreprocessBindGroupBuilder<'a> {
1885    /// The render-world entity corresponding to the current view.
1886    view: Entity,
1887    /// The indirect compute dispatch parameters buffer for indexed meshes in
1888    /// the late prepass.
1889    late_indexed_indirect_parameters_buffer:
1890        &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1891    /// The indirect compute dispatch parameters buffer for non-indexed meshes
1892    /// in the late prepass.
1893    late_non_indexed_indirect_parameters_buffer:
1894        &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1895    /// The device.
1896    render_device: &'a RenderDevice,
1897    /// The buffers that store indirect draw parameters.
1898    phase_indirect_parameters_buffers: &'a UntypedPhaseIndirectParametersBuffers,
1899    /// The GPU buffer that stores the information needed to cull each mesh.
1900    mesh_culling_data_buffer: &'a MeshCullingDataBuffer,
1901    /// The GPU buffer that stores information about the view.
1902    view_uniforms: &'a ViewUniforms,
1903    /// The GPU buffer that stores information about the view from last frame.
1904    previous_view_uniforms: &'a PreviousViewUniforms,
1905    /// The pipelines for the mesh preprocessing shader.
1906    pipelines: &'a PreprocessPipelines,
1907    /// The GPU buffer containing the list of [`MeshInputUniform`]s for the
1908    /// current frame.
1909    current_input_buffer: &'a Buffer,
1910    /// The GPU buffer containing the list of [`MeshInputUniform`]s for the
1911    /// previous frame.
1912    previous_input_buffer: &'a Buffer,
1913    /// The GPU buffer containing the list of [`MeshUniform`]s for the current
1914    /// frame.
1915    ///
1916    /// This is the buffer containing the mesh's final transforms that the
1917    /// shaders will write to.
1918    data_buffer: &'a Buffer,
1919}
1920
1921impl<'a> PreprocessBindGroupBuilder<'a> {
1922    /// Creates the bind groups for mesh preprocessing when GPU frustum culling
1923    /// and GPU occlusion culling are both disabled.
1924    fn create_direct_preprocess_bind_groups(
1925        &self,
1926        work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1927    ) -> Option<PhasePreprocessBindGroups> {
1928        // Don't use `as_entire_binding()` here; the shader reads the array
1929        // length and the underlying buffer may be longer than the actual size
1930        // of the vector.
1931        let work_item_buffer_size = NonZero::<u64>::try_from(
1932            work_item_buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()),
1933        )
1934        .ok();
1935
1936        Some(PhasePreprocessBindGroups::Direct(
1937            self.render_device.create_bind_group(
1938                "preprocess_direct_bind_group",
1939                &self.pipelines.direct_preprocess.bind_group_layout,
1940                &BindGroupEntries::with_indices((
1941                    (0, self.view_uniforms.uniforms.binding()?),
1942                    (3, self.current_input_buffer.as_entire_binding()),
1943                    (4, self.previous_input_buffer.as_entire_binding()),
1944                    (
1945                        5,
1946                        BindingResource::Buffer(BufferBinding {
1947                            buffer: work_item_buffer.buffer()?,
1948                            offset: 0,
1949                            size: work_item_buffer_size,
1950                        }),
1951                    ),
1952                    (6, self.data_buffer.as_entire_binding()),
1953                )),
1954            ),
1955        ))
1956    }
1957
1958    /// Creates the bind groups for mesh preprocessing when GPU occlusion
1959    /// culling is enabled.
1960    fn create_indirect_occlusion_culling_preprocess_bind_groups(
1961        &self,
1962        view_depth_pyramids: &Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1963        indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1964        non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1965        gpu_occlusion_culling_work_item_buffers: &GpuOcclusionCullingWorkItemBuffers,
1966    ) -> Option<PhasePreprocessBindGroups> {
1967        let GpuOcclusionCullingWorkItemBuffers {
1968            late_indexed: ref late_indexed_work_item_buffer,
1969            late_non_indexed: ref late_non_indexed_work_item_buffer,
1970            ..
1971        } = *gpu_occlusion_culling_work_item_buffers;
1972
1973        let (view_depth_pyramid, previous_view_uniform_offset) =
1974            view_depth_pyramids.get(self.view).ok()?;
1975
1976        Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
1977            early_indexed: self.create_indirect_occlusion_culling_early_indexed_bind_group(
1978                view_depth_pyramid,
1979                previous_view_uniform_offset,
1980                indexed_work_item_buffer,
1981                late_indexed_work_item_buffer,
1982            ),
1983
1984            early_non_indexed: self.create_indirect_occlusion_culling_early_non_indexed_bind_group(
1985                view_depth_pyramid,
1986                previous_view_uniform_offset,
1987                non_indexed_work_item_buffer,
1988                late_non_indexed_work_item_buffer,
1989            ),
1990
1991            late_indexed: self.create_indirect_occlusion_culling_late_indexed_bind_group(
1992                view_depth_pyramid,
1993                previous_view_uniform_offset,
1994                late_indexed_work_item_buffer,
1995            ),
1996
1997            late_non_indexed: self.create_indirect_occlusion_culling_late_non_indexed_bind_group(
1998                view_depth_pyramid,
1999                previous_view_uniform_offset,
2000                late_non_indexed_work_item_buffer,
2001            ),
2002        })
2003    }
2004
2005    /// Creates the bind group for the first phase of mesh preprocessing of
2006    /// indexed meshes when GPU occlusion culling is enabled.
2007    fn create_indirect_occlusion_culling_early_indexed_bind_group(
2008        &self,
2009        view_depth_pyramid: &ViewDepthPyramid,
2010        previous_view_uniform_offset: &PreviousViewUniformOffset,
2011        indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2012        late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2013    ) -> Option<BindGroup> {
2014        let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2015        let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2016        let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2017
2018        match (
2019            self.phase_indirect_parameters_buffers
2020                .indexed
2021                .cpu_metadata_buffer(),
2022            self.phase_indirect_parameters_buffers
2023                .indexed
2024                .gpu_metadata_buffer(),
2025            indexed_work_item_buffer.buffer(),
2026            late_indexed_work_item_buffer.buffer(),
2027            self.late_indexed_indirect_parameters_buffer.buffer(),
2028        ) {
2029            (
2030                Some(indexed_cpu_metadata_buffer),
2031                Some(indexed_gpu_metadata_buffer),
2032                Some(indexed_work_item_gpu_buffer),
2033                Some(late_indexed_work_item_gpu_buffer),
2034                Some(late_indexed_indirect_parameters_buffer),
2035            ) => {
2036                // Don't use `as_entire_binding()` here; the shader reads the array
2037                // length and the underlying buffer may be longer than the actual size
2038                // of the vector.
2039                let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2040                    indexed_work_item_buffer.len() as u64
2041                        * u64::from(PreprocessWorkItem::min_size()),
2042                )
2043                .ok();
2044
2045                Some(
2046                    self.render_device.create_bind_group(
2047                        "preprocess_early_indexed_gpu_occlusion_culling_bind_group",
2048                        &self
2049                            .pipelines
2050                            .early_gpu_occlusion_culling_preprocess
2051                            .bind_group_layout,
2052                        &BindGroupEntries::with_indices((
2053                            (3, self.current_input_buffer.as_entire_binding()),
2054                            (4, self.previous_input_buffer.as_entire_binding()),
2055                            (
2056                                5,
2057                                BindingResource::Buffer(BufferBinding {
2058                                    buffer: indexed_work_item_gpu_buffer,
2059                                    offset: 0,
2060                                    size: indexed_work_item_buffer_size,
2061                                }),
2062                            ),
2063                            (6, self.data_buffer.as_entire_binding()),
2064                            (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2065                            (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2066                            (9, mesh_culling_data_buffer.as_entire_binding()),
2067                            (0, view_uniforms_binding.clone()),
2068                            (10, &view_depth_pyramid.all_mips),
2069                            (
2070                                2,
2071                                BufferBinding {
2072                                    buffer: previous_view_buffer,
2073                                    offset: previous_view_uniform_offset.offset as u64,
2074                                    size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2075                                },
2076                            ),
2077                            (
2078                                11,
2079                                BufferBinding {
2080                                    buffer: late_indexed_work_item_gpu_buffer,
2081                                    offset: 0,
2082                                    size: indexed_work_item_buffer_size,
2083                                },
2084                            ),
2085                            (
2086                                12,
2087                                BufferBinding {
2088                                    buffer: late_indexed_indirect_parameters_buffer,
2089                                    offset: 0,
2090                                    size: NonZeroU64::new(
2091                                        late_indexed_indirect_parameters_buffer.size(),
2092                                    ),
2093                                },
2094                            ),
2095                        )),
2096                    ),
2097                )
2098            }
2099            _ => None,
2100        }
2101    }
2102
2103    /// Creates the bind group for the first phase of mesh preprocessing of
2104    /// non-indexed meshes when GPU occlusion culling is enabled.
2105    fn create_indirect_occlusion_culling_early_non_indexed_bind_group(
2106        &self,
2107        view_depth_pyramid: &ViewDepthPyramid,
2108        previous_view_uniform_offset: &PreviousViewUniformOffset,
2109        non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2110        late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2111    ) -> Option<BindGroup> {
2112        let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2113        let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2114        let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2115
2116        match (
2117            self.phase_indirect_parameters_buffers
2118                .non_indexed
2119                .cpu_metadata_buffer(),
2120            self.phase_indirect_parameters_buffers
2121                .non_indexed
2122                .gpu_metadata_buffer(),
2123            non_indexed_work_item_buffer.buffer(),
2124            late_non_indexed_work_item_buffer.buffer(),
2125            self.late_non_indexed_indirect_parameters_buffer.buffer(),
2126        ) {
2127            (
2128                Some(non_indexed_cpu_metadata_buffer),
2129                Some(non_indexed_gpu_metadata_buffer),
2130                Some(non_indexed_work_item_gpu_buffer),
2131                Some(late_non_indexed_work_item_buffer),
2132                Some(late_non_indexed_indirect_parameters_buffer),
2133            ) => {
2134                // Don't use `as_entire_binding()` here; the shader reads the array
2135                // length and the underlying buffer may be longer than the actual size
2136                // of the vector.
2137                let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2138                    non_indexed_work_item_buffer.len() as u64
2139                        * u64::from(PreprocessWorkItem::min_size()),
2140                )
2141                .ok();
2142
2143                Some(
2144                    self.render_device.create_bind_group(
2145                        "preprocess_early_non_indexed_gpu_occlusion_culling_bind_group",
2146                        &self
2147                            .pipelines
2148                            .early_gpu_occlusion_culling_preprocess
2149                            .bind_group_layout,
2150                        &BindGroupEntries::with_indices((
2151                            (3, self.current_input_buffer.as_entire_binding()),
2152                            (4, self.previous_input_buffer.as_entire_binding()),
2153                            (
2154                                5,
2155                                BindingResource::Buffer(BufferBinding {
2156                                    buffer: non_indexed_work_item_gpu_buffer,
2157                                    offset: 0,
2158                                    size: non_indexed_work_item_buffer_size,
2159                                }),
2160                            ),
2161                            (6, self.data_buffer.as_entire_binding()),
2162                            (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2163                            (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2164                            (9, mesh_culling_data_buffer.as_entire_binding()),
2165                            (0, view_uniforms_binding.clone()),
2166                            (10, &view_depth_pyramid.all_mips),
2167                            (
2168                                2,
2169                                BufferBinding {
2170                                    buffer: previous_view_buffer,
2171                                    offset: previous_view_uniform_offset.offset as u64,
2172                                    size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2173                                },
2174                            ),
2175                            (
2176                                11,
2177                                BufferBinding {
2178                                    buffer: late_non_indexed_work_item_buffer,
2179                                    offset: 0,
2180                                    size: non_indexed_work_item_buffer_size,
2181                                },
2182                            ),
2183                            (
2184                                12,
2185                                BufferBinding {
2186                                    buffer: late_non_indexed_indirect_parameters_buffer,
2187                                    offset: 0,
2188                                    size: NonZeroU64::new(
2189                                        late_non_indexed_indirect_parameters_buffer.size(),
2190                                    ),
2191                                },
2192                            ),
2193                        )),
2194                    ),
2195                )
2196            }
2197            _ => None,
2198        }
2199    }
2200
2201    /// Creates the bind group for the second phase of mesh preprocessing of
2202    /// indexed meshes when GPU occlusion culling is enabled.
2203    fn create_indirect_occlusion_culling_late_indexed_bind_group(
2204        &self,
2205        view_depth_pyramid: &ViewDepthPyramid,
2206        previous_view_uniform_offset: &PreviousViewUniformOffset,
2207        late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2208    ) -> Option<BindGroup> {
2209        let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2210        let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2211        let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2212
2213        match (
2214            self.phase_indirect_parameters_buffers
2215                .indexed
2216                .cpu_metadata_buffer(),
2217            self.phase_indirect_parameters_buffers
2218                .indexed
2219                .gpu_metadata_buffer(),
2220            late_indexed_work_item_buffer.buffer(),
2221            self.late_indexed_indirect_parameters_buffer.buffer(),
2222        ) {
2223            (
2224                Some(indexed_cpu_metadata_buffer),
2225                Some(indexed_gpu_metadata_buffer),
2226                Some(late_indexed_work_item_gpu_buffer),
2227                Some(late_indexed_indirect_parameters_buffer),
2228            ) => {
2229                // Don't use `as_entire_binding()` here; the shader reads the array
2230                // length and the underlying buffer may be longer than the actual size
2231                // of the vector.
2232                let late_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2233                    late_indexed_work_item_buffer.len() as u64
2234                        * u64::from(PreprocessWorkItem::min_size()),
2235                )
2236                .ok();
2237
2238                Some(
2239                    self.render_device.create_bind_group(
2240                        "preprocess_late_indexed_gpu_occlusion_culling_bind_group",
2241                        &self
2242                            .pipelines
2243                            .late_gpu_occlusion_culling_preprocess
2244                            .bind_group_layout,
2245                        &BindGroupEntries::with_indices((
2246                            (3, self.current_input_buffer.as_entire_binding()),
2247                            (4, self.previous_input_buffer.as_entire_binding()),
2248                            (
2249                                5,
2250                                BindingResource::Buffer(BufferBinding {
2251                                    buffer: late_indexed_work_item_gpu_buffer,
2252                                    offset: 0,
2253                                    size: late_indexed_work_item_buffer_size,
2254                                }),
2255                            ),
2256                            (6, self.data_buffer.as_entire_binding()),
2257                            (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2258                            (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2259                            (9, mesh_culling_data_buffer.as_entire_binding()),
2260                            (0, view_uniforms_binding.clone()),
2261                            (10, &view_depth_pyramid.all_mips),
2262                            (
2263                                2,
2264                                BufferBinding {
2265                                    buffer: previous_view_buffer,
2266                                    offset: previous_view_uniform_offset.offset as u64,
2267                                    size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2268                                },
2269                            ),
2270                            (
2271                                12,
2272                                BufferBinding {
2273                                    buffer: late_indexed_indirect_parameters_buffer,
2274                                    offset: 0,
2275                                    size: NonZeroU64::new(
2276                                        late_indexed_indirect_parameters_buffer.size(),
2277                                    ),
2278                                },
2279                            ),
2280                        )),
2281                    ),
2282                )
2283            }
2284            _ => None,
2285        }
2286    }
2287
2288    /// Creates the bind group for the second phase of mesh preprocessing of
2289    /// non-indexed meshes when GPU occlusion culling is enabled.
2290    fn create_indirect_occlusion_culling_late_non_indexed_bind_group(
2291        &self,
2292        view_depth_pyramid: &ViewDepthPyramid,
2293        previous_view_uniform_offset: &PreviousViewUniformOffset,
2294        late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2295    ) -> Option<BindGroup> {
2296        let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2297        let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2298        let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2299
2300        match (
2301            self.phase_indirect_parameters_buffers
2302                .non_indexed
2303                .cpu_metadata_buffer(),
2304            self.phase_indirect_parameters_buffers
2305                .non_indexed
2306                .gpu_metadata_buffer(),
2307            late_non_indexed_work_item_buffer.buffer(),
2308            self.late_non_indexed_indirect_parameters_buffer.buffer(),
2309        ) {
2310            (
2311                Some(non_indexed_cpu_metadata_buffer),
2312                Some(non_indexed_gpu_metadata_buffer),
2313                Some(non_indexed_work_item_gpu_buffer),
2314                Some(late_non_indexed_indirect_parameters_buffer),
2315            ) => {
2316                // Don't use `as_entire_binding()` here; the shader reads the array
2317                // length and the underlying buffer may be longer than the actual size
2318                // of the vector.
2319                let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2320                    late_non_indexed_work_item_buffer.len() as u64
2321                        * u64::from(PreprocessWorkItem::min_size()),
2322                )
2323                .ok();
2324
2325                Some(
2326                    self.render_device.create_bind_group(
2327                        "preprocess_late_non_indexed_gpu_occlusion_culling_bind_group",
2328                        &self
2329                            .pipelines
2330                            .late_gpu_occlusion_culling_preprocess
2331                            .bind_group_layout,
2332                        &BindGroupEntries::with_indices((
2333                            (3, self.current_input_buffer.as_entire_binding()),
2334                            (4, self.previous_input_buffer.as_entire_binding()),
2335                            (
2336                                5,
2337                                BindingResource::Buffer(BufferBinding {
2338                                    buffer: non_indexed_work_item_gpu_buffer,
2339                                    offset: 0,
2340                                    size: non_indexed_work_item_buffer_size,
2341                                }),
2342                            ),
2343                            (6, self.data_buffer.as_entire_binding()),
2344                            (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2345                            (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2346                            (9, mesh_culling_data_buffer.as_entire_binding()),
2347                            (0, view_uniforms_binding.clone()),
2348                            (10, &view_depth_pyramid.all_mips),
2349                            (
2350                                2,
2351                                BufferBinding {
2352                                    buffer: previous_view_buffer,
2353                                    offset: previous_view_uniform_offset.offset as u64,
2354                                    size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2355                                },
2356                            ),
2357                            (
2358                                12,
2359                                BufferBinding {
2360                                    buffer: late_non_indexed_indirect_parameters_buffer,
2361                                    offset: 0,
2362                                    size: NonZeroU64::new(
2363                                        late_non_indexed_indirect_parameters_buffer.size(),
2364                                    ),
2365                                },
2366                            ),
2367                        )),
2368                    ),
2369                )
2370            }
2371            _ => None,
2372        }
2373    }
2374
2375    /// Creates the bind groups for mesh preprocessing when GPU frustum culling
2376    /// is enabled, but GPU occlusion culling is disabled.
2377    fn create_indirect_frustum_culling_preprocess_bind_groups(
2378        &self,
2379        indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2380        non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2381    ) -> Option<PhasePreprocessBindGroups> {
2382        Some(PhasePreprocessBindGroups::IndirectFrustumCulling {
2383            indexed: self
2384                .create_indirect_frustum_culling_indexed_bind_group(indexed_work_item_buffer),
2385            non_indexed: self.create_indirect_frustum_culling_non_indexed_bind_group(
2386                non_indexed_work_item_buffer,
2387            ),
2388        })
2389    }
2390
2391    /// Creates the bind group for mesh preprocessing of indexed meshes when GPU
2392    /// frustum culling is enabled, but GPU occlusion culling is disabled.
2393    fn create_indirect_frustum_culling_indexed_bind_group(
2394        &self,
2395        indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2396    ) -> Option<BindGroup> {
2397        let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2398        let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2399
2400        match (
2401            self.phase_indirect_parameters_buffers
2402                .indexed
2403                .cpu_metadata_buffer(),
2404            self.phase_indirect_parameters_buffers
2405                .indexed
2406                .gpu_metadata_buffer(),
2407            indexed_work_item_buffer.buffer(),
2408        ) {
2409            (
2410                Some(indexed_cpu_metadata_buffer),
2411                Some(indexed_gpu_metadata_buffer),
2412                Some(indexed_work_item_gpu_buffer),
2413            ) => {
2414                // Don't use `as_entire_binding()` here; the shader reads the array
2415                // length and the underlying buffer may be longer than the actual size
2416                // of the vector.
2417                let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2418                    indexed_work_item_buffer.len() as u64
2419                        * u64::from(PreprocessWorkItem::min_size()),
2420                )
2421                .ok();
2422
2423                Some(
2424                    self.render_device.create_bind_group(
2425                        "preprocess_gpu_indexed_frustum_culling_bind_group",
2426                        &self
2427                            .pipelines
2428                            .gpu_frustum_culling_preprocess
2429                            .bind_group_layout,
2430                        &BindGroupEntries::with_indices((
2431                            (3, self.current_input_buffer.as_entire_binding()),
2432                            (4, self.previous_input_buffer.as_entire_binding()),
2433                            (
2434                                5,
2435                                BindingResource::Buffer(BufferBinding {
2436                                    buffer: indexed_work_item_gpu_buffer,
2437                                    offset: 0,
2438                                    size: indexed_work_item_buffer_size,
2439                                }),
2440                            ),
2441                            (6, self.data_buffer.as_entire_binding()),
2442                            (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2443                            (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2444                            (9, mesh_culling_data_buffer.as_entire_binding()),
2445                            (0, view_uniforms_binding.clone()),
2446                        )),
2447                    ),
2448                )
2449            }
2450            _ => None,
2451        }
2452    }
2453
2454    /// Creates the bind group for mesh preprocessing of non-indexed meshes when
2455    /// GPU frustum culling is enabled, but GPU occlusion culling is disabled.
2456    fn create_indirect_frustum_culling_non_indexed_bind_group(
2457        &self,
2458        non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2459    ) -> Option<BindGroup> {
2460        let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2461        let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2462
2463        match (
2464            self.phase_indirect_parameters_buffers
2465                .non_indexed
2466                .cpu_metadata_buffer(),
2467            self.phase_indirect_parameters_buffers
2468                .non_indexed
2469                .gpu_metadata_buffer(),
2470            non_indexed_work_item_buffer.buffer(),
2471        ) {
2472            (
2473                Some(non_indexed_cpu_metadata_buffer),
2474                Some(non_indexed_gpu_metadata_buffer),
2475                Some(non_indexed_work_item_gpu_buffer),
2476            ) => {
2477                // Don't use `as_entire_binding()` here; the shader reads the array
2478                // length and the underlying buffer may be longer than the actual size
2479                // of the vector.
2480                let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2481                    non_indexed_work_item_buffer.len() as u64
2482                        * u64::from(PreprocessWorkItem::min_size()),
2483                )
2484                .ok();
2485
2486                Some(
2487                    self.render_device.create_bind_group(
2488                        "preprocess_gpu_non_indexed_frustum_culling_bind_group",
2489                        &self
2490                            .pipelines
2491                            .gpu_frustum_culling_preprocess
2492                            .bind_group_layout,
2493                        &BindGroupEntries::with_indices((
2494                            (3, self.current_input_buffer.as_entire_binding()),
2495                            (4, self.previous_input_buffer.as_entire_binding()),
2496                            (
2497                                5,
2498                                BindingResource::Buffer(BufferBinding {
2499                                    buffer: non_indexed_work_item_gpu_buffer,
2500                                    offset: 0,
2501                                    size: non_indexed_work_item_buffer_size,
2502                                }),
2503                            ),
2504                            (6, self.data_buffer.as_entire_binding()),
2505                            (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2506                            (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2507                            (9, mesh_culling_data_buffer.as_entire_binding()),
2508                            (0, view_uniforms_binding.clone()),
2509                        )),
2510                    ),
2511                )
2512            }
2513            _ => None,
2514        }
2515    }
2516}
2517
2518/// A system that creates bind groups from the indirect parameters metadata and
2519/// data buffers for the indirect batch set reset shader and the indirect
2520/// parameter building shader.
2521fn create_build_indirect_parameters_bind_groups(
2522    commands: &mut Commands,
2523    render_device: &RenderDevice,
2524    pipelines: &PreprocessPipelines,
2525    current_input_buffer: &Buffer,
2526    indirect_parameters_buffers: &IndirectParametersBuffers,
2527) {
2528    let mut build_indirect_parameters_bind_groups = BuildIndirectParametersBindGroups::new();
2529
2530    for (phase_type_id, phase_indirect_parameters_buffer) in indirect_parameters_buffers.iter() {
2531        build_indirect_parameters_bind_groups.insert(
2532            *phase_type_id,
2533            PhaseBuildIndirectParametersBindGroups {
2534                reset_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2535                    .indexed
2536                    .batch_sets_buffer(),)
2537                {
2538                    (Some(indexed_batch_sets_buffer),) => Some(
2539                        render_device.create_bind_group(
2540                            "reset_indexed_indirect_batch_sets_bind_group",
2541                            // The early bind group is good for the main phase and late
2542                            // phase too. They bind the same buffers.
2543                            &pipelines
2544                                .early_phase
2545                                .reset_indirect_batch_sets
2546                                .bind_group_layout,
2547                            &BindGroupEntries::sequential((
2548                                indexed_batch_sets_buffer.as_entire_binding(),
2549                            )),
2550                        ),
2551                    ),
2552                    _ => None,
2553                },
2554
2555                reset_non_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2556                    .non_indexed
2557                    .batch_sets_buffer(),)
2558                {
2559                    (Some(non_indexed_batch_sets_buffer),) => Some(
2560                        render_device.create_bind_group(
2561                            "reset_non_indexed_indirect_batch_sets_bind_group",
2562                            // The early bind group is good for the main phase and late
2563                            // phase too. They bind the same buffers.
2564                            &pipelines
2565                                .early_phase
2566                                .reset_indirect_batch_sets
2567                                .bind_group_layout,
2568                            &BindGroupEntries::sequential((
2569                                non_indexed_batch_sets_buffer.as_entire_binding(),
2570                            )),
2571                        ),
2572                    ),
2573                    _ => None,
2574                },
2575
2576                build_indexed_indirect: match (
2577                    phase_indirect_parameters_buffer
2578                        .indexed
2579                        .cpu_metadata_buffer(),
2580                    phase_indirect_parameters_buffer
2581                        .indexed
2582                        .gpu_metadata_buffer(),
2583                    phase_indirect_parameters_buffer.indexed.data_buffer(),
2584                    phase_indirect_parameters_buffer.indexed.batch_sets_buffer(),
2585                ) {
2586                    (
2587                        Some(indexed_indirect_parameters_cpu_metadata_buffer),
2588                        Some(indexed_indirect_parameters_gpu_metadata_buffer),
2589                        Some(indexed_indirect_parameters_data_buffer),
2590                        Some(indexed_batch_sets_buffer),
2591                    ) => Some(
2592                        render_device.create_bind_group(
2593                            "build_indexed_indirect_parameters_bind_group",
2594                            // The frustum culling bind group is good for occlusion culling
2595                            // too. They bind the same buffers.
2596                            &pipelines
2597                                .gpu_frustum_culling_build_indexed_indirect_params
2598                                .bind_group_layout,
2599                            &BindGroupEntries::sequential((
2600                                current_input_buffer.as_entire_binding(),
2601                                // Don't use `as_entire_binding` here; the shader reads
2602                                // the length and `RawBufferVec` overallocates.
2603                                BufferBinding {
2604                                    buffer: indexed_indirect_parameters_cpu_metadata_buffer,
2605                                    offset: 0,
2606                                    size: NonZeroU64::new(
2607                                        phase_indirect_parameters_buffer.indexed.batch_count()
2608                                            as u64
2609                                            * size_of::<IndirectParametersCpuMetadata>() as u64,
2610                                    ),
2611                                },
2612                                BufferBinding {
2613                                    buffer: indexed_indirect_parameters_gpu_metadata_buffer,
2614                                    offset: 0,
2615                                    size: NonZeroU64::new(
2616                                        phase_indirect_parameters_buffer.indexed.batch_count()
2617                                            as u64
2618                                            * size_of::<IndirectParametersGpuMetadata>() as u64,
2619                                    ),
2620                                },
2621                                indexed_batch_sets_buffer.as_entire_binding(),
2622                                indexed_indirect_parameters_data_buffer.as_entire_binding(),
2623                            )),
2624                        ),
2625                    ),
2626                    _ => None,
2627                },
2628
2629                build_non_indexed_indirect: match (
2630                    phase_indirect_parameters_buffer
2631                        .non_indexed
2632                        .cpu_metadata_buffer(),
2633                    phase_indirect_parameters_buffer
2634                        .non_indexed
2635                        .gpu_metadata_buffer(),
2636                    phase_indirect_parameters_buffer.non_indexed.data_buffer(),
2637                    phase_indirect_parameters_buffer
2638                        .non_indexed
2639                        .batch_sets_buffer(),
2640                ) {
2641                    (
2642                        Some(non_indexed_indirect_parameters_cpu_metadata_buffer),
2643                        Some(non_indexed_indirect_parameters_gpu_metadata_buffer),
2644                        Some(non_indexed_indirect_parameters_data_buffer),
2645                        Some(non_indexed_batch_sets_buffer),
2646                    ) => Some(
2647                        render_device.create_bind_group(
2648                            "build_non_indexed_indirect_parameters_bind_group",
2649                            // The frustum culling bind group is good for occlusion culling
2650                            // too. They bind the same buffers.
2651                            &pipelines
2652                                .gpu_frustum_culling_build_non_indexed_indirect_params
2653                                .bind_group_layout,
2654                            &BindGroupEntries::sequential((
2655                                current_input_buffer.as_entire_binding(),
2656                                // Don't use `as_entire_binding` here; the shader reads
2657                                // the length and `RawBufferVec` overallocates.
2658                                BufferBinding {
2659                                    buffer: non_indexed_indirect_parameters_cpu_metadata_buffer,
2660                                    offset: 0,
2661                                    size: NonZeroU64::new(
2662                                        phase_indirect_parameters_buffer.non_indexed.batch_count()
2663                                            as u64
2664                                            * size_of::<IndirectParametersCpuMetadata>() as u64,
2665                                    ),
2666                                },
2667                                BufferBinding {
2668                                    buffer: non_indexed_indirect_parameters_gpu_metadata_buffer,
2669                                    offset: 0,
2670                                    size: NonZeroU64::new(
2671                                        phase_indirect_parameters_buffer.non_indexed.batch_count()
2672                                            as u64
2673                                            * size_of::<IndirectParametersGpuMetadata>() as u64,
2674                                    ),
2675                                },
2676                                non_indexed_batch_sets_buffer.as_entire_binding(),
2677                                non_indexed_indirect_parameters_data_buffer.as_entire_binding(),
2678                            )),
2679                        ),
2680                    ),
2681                    _ => None,
2682                },
2683            },
2684        );
2685    }
2686
2687    commands.insert_resource(build_indirect_parameters_bind_groups);
2688}
2689
2690/// Writes the information needed to do GPU mesh culling to the GPU.
2691pub fn write_mesh_culling_data_buffer(
2692    render_device: Res<RenderDevice>,
2693    render_queue: Res<RenderQueue>,
2694    mut mesh_culling_data_buffer: ResMut<MeshCullingDataBuffer>,
2695) {
2696    mesh_culling_data_buffer.write_buffer(&render_device, &render_queue);
2697}