1use core::num::{NonZero, NonZeroU64};
10
11use bevy_app::{App, Plugin};
12use bevy_asset::{embedded_asset, load_embedded_asset, Handle};
13use bevy_core_pipeline::{
14 core_3d::graph::{Core3d, Node3d},
15 experimental::mip_generation::ViewDepthPyramid,
16 prepass::{DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms},
17};
18use bevy_derive::{Deref, DerefMut};
19use bevy_ecs::{
20 component::Component,
21 entity::Entity,
22 prelude::resource_exists,
23 query::{Has, Or, QueryState, With, Without},
24 resource::Resource,
25 schedule::IntoScheduleConfigs as _,
26 system::{lifetimeless::Read, Commands, Query, Res, ResMut},
27 world::{FromWorld, World},
28};
29use bevy_render::{
30 batching::gpu_preprocessing::{
31 BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingMode,
32 GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers,
33 IndirectParametersCpuMetadata, IndirectParametersGpuMetadata, IndirectParametersIndexed,
34 IndirectParametersNonIndexed, LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem,
35 PreprocessWorkItemBuffers, UntypedPhaseBatchedInstanceBuffers,
36 UntypedPhaseIndirectParametersBuffers,
37 },
38 diagnostic::RecordDiagnostics,
39 experimental::occlusion_culling::OcclusionCulling,
40 render_graph::{Node, NodeRunError, RenderGraphContext, RenderGraphExt},
41 render_resource::{
42 binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer},
43 BindGroup, BindGroupEntries, BindGroupLayout, BindingResource, Buffer, BufferBinding,
44 CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor,
45 DynamicBindGroupLayoutEntries, PipelineCache, PushConstantRange, RawBufferVec,
46 ShaderStages, ShaderType, SpecializedComputePipeline, SpecializedComputePipelines,
47 TextureSampleType, UninitBufferVec,
48 },
49 renderer::{RenderContext, RenderDevice, RenderQueue},
50 settings::WgpuFeatures,
51 view::{ExtractedView, NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms},
52 Render, RenderApp, RenderSystems,
53};
54use bevy_shader::Shader;
55use bevy_utils::{default, TypeIdMap};
56use bitflags::bitflags;
57use smallvec::{smallvec, SmallVec};
58use tracing::warn;
59
60use crate::{
61 graph::NodePbr, MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform,
62};
63
64use super::{ShadowView, ViewLightEntities};
65
66const WORKGROUP_SIZE: usize = 64;
68
69pub struct GpuMeshPreprocessPlugin {
74 pub use_gpu_instance_buffer_builder: bool,
79}
80
81#[derive(Default)]
85pub struct ClearIndirectParametersMetadataNode;
86
87pub struct EarlyGpuPreprocessNode {
95 view_query: QueryState<
96 (
97 Read<ExtractedView>,
98 Option<Read<PreprocessBindGroups>>,
99 Option<Read<ViewUniformOffset>>,
100 Has<NoIndirectDrawing>,
101 Has<OcclusionCulling>,
102 ),
103 Without<SkipGpuPreprocess>,
104 >,
105 main_view_query: QueryState<Read<ViewLightEntities>>,
106}
107
108pub struct LateGpuPreprocessNode {
116 view_query: QueryState<
117 (
118 Read<ExtractedView>,
119 Read<PreprocessBindGroups>,
120 Read<ViewUniformOffset>,
121 ),
122 (
123 Without<SkipGpuPreprocess>,
124 Without<NoIndirectDrawing>,
125 With<OcclusionCulling>,
126 With<DepthPrepass>,
127 ),
128 >,
129}
130
131pub struct EarlyPrepassBuildIndirectParametersNode {
139 view_query: QueryState<
140 Read<PreprocessBindGroups>,
141 (
142 Without<SkipGpuPreprocess>,
143 Without<NoIndirectDrawing>,
144 Or<(With<DepthPrepass>, With<ShadowView>)>,
145 ),
146 >,
147}
148
149pub struct LatePrepassBuildIndirectParametersNode {
158 view_query: QueryState<
159 Read<PreprocessBindGroups>,
160 (
161 Without<SkipGpuPreprocess>,
162 Without<NoIndirectDrawing>,
163 Or<(With<DepthPrepass>, With<ShadowView>)>,
164 With<OcclusionCulling>,
165 ),
166 >,
167}
168
169pub struct MainBuildIndirectParametersNode {
178 view_query: QueryState<
179 Read<PreprocessBindGroups>,
180 (Without<SkipGpuPreprocess>, Without<NoIndirectDrawing>),
181 >,
182}
183
184#[derive(Resource)]
187pub struct PreprocessPipelines {
188 pub direct_preprocess: PreprocessPipeline,
191 pub gpu_frustum_culling_preprocess: PreprocessPipeline,
196 pub early_gpu_occlusion_culling_preprocess: PreprocessPipeline,
201 pub late_gpu_occlusion_culling_preprocess: PreprocessPipeline,
206 pub gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
209 pub gpu_frustum_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
213 pub early_phase: PreprocessPhasePipelines,
216 pub late_phase: PreprocessPhasePipelines,
220 pub main_phase: PreprocessPhasePipelines,
222}
223
224#[derive(Clone)]
228pub struct PreprocessPhasePipelines {
229 pub reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline,
232 pub gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
237 pub gpu_occlusion_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
242}
243
244pub struct PreprocessPipeline {
246 pub bind_group_layout: BindGroupLayout,
248 pub shader: Handle<Shader>,
250 pub pipeline_id: Option<CachedComputePipelineId>,
254}
255
256#[derive(Clone)]
261pub struct ResetIndirectBatchSetsPipeline {
262 pub bind_group_layout: BindGroupLayout,
264 pub shader: Handle<Shader>,
266 pub pipeline_id: Option<CachedComputePipelineId>,
270}
271
272#[derive(Clone)]
274pub struct BuildIndirectParametersPipeline {
275 pub bind_group_layout: BindGroupLayout,
277 pub shader: Handle<Shader>,
279 pub pipeline_id: Option<CachedComputePipelineId>,
283}
284
285bitflags! {
286 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
288 pub struct PreprocessPipelineKey: u8 {
289 const FRUSTUM_CULLING = 1;
293 const OCCLUSION_CULLING = 2;
297 const EARLY_PHASE = 4;
301 }
302
303 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
305 pub struct BuildIndirectParametersPipelineKey: u8 {
306 const INDEXED = 1;
311 const MULTI_DRAW_INDIRECT_COUNT_SUPPORTED = 2;
315 const OCCLUSION_CULLING = 4;
319 const EARLY_PHASE = 8;
323 const LATE_PHASE = 16;
327 const MAIN_PHASE = 32;
333 }
334}
335
336#[derive(Component, Clone, Deref, DerefMut)]
343pub struct PreprocessBindGroups(pub TypeIdMap<PhasePreprocessBindGroups>);
344
345#[derive(Clone)]
348pub enum PhasePreprocessBindGroups {
349 Direct(BindGroup),
355
356 IndirectFrustumCulling {
362 indexed: Option<BindGroup>,
364 non_indexed: Option<BindGroup>,
366 },
367
368 IndirectOcclusionCulling {
376 early_indexed: Option<BindGroup>,
379 early_non_indexed: Option<BindGroup>,
382 late_indexed: Option<BindGroup>,
385 late_non_indexed: Option<BindGroup>,
388 },
389}
390
391#[derive(Resource, Default, Deref, DerefMut)]
397pub struct BuildIndirectParametersBindGroups(pub TypeIdMap<PhaseBuildIndirectParametersBindGroups>);
398
399impl BuildIndirectParametersBindGroups {
400 pub fn new() -> BuildIndirectParametersBindGroups {
402 Self::default()
403 }
404}
405
406pub struct PhaseBuildIndirectParametersBindGroups {
409 reset_indexed_indirect_batch_sets: Option<BindGroup>,
412 reset_non_indexed_indirect_batch_sets: Option<BindGroup>,
415 build_indexed_indirect: Option<BindGroup>,
418 build_non_indexed_indirect: Option<BindGroup>,
421}
422
423#[derive(Component, Default)]
426pub struct SkipGpuPreprocess;
427
428impl Plugin for GpuMeshPreprocessPlugin {
429 fn build(&self, app: &mut App) {
430 embedded_asset!(app, "mesh_preprocess.wgsl");
431 embedded_asset!(app, "reset_indirect_batch_sets.wgsl");
432 embedded_asset!(app, "build_indirect_params.wgsl");
433 }
434
435 fn finish(&self, app: &mut App) {
436 let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
437 return;
438 };
439
440 let gpu_preprocessing_support = render_app.world().resource::<GpuPreprocessingSupport>();
443 if !self.use_gpu_instance_buffer_builder || !gpu_preprocessing_support.is_available() {
444 return;
445 }
446
447 render_app
448 .init_resource::<PreprocessPipelines>()
449 .init_resource::<SpecializedComputePipelines<PreprocessPipeline>>()
450 .init_resource::<SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>>()
451 .init_resource::<SpecializedComputePipelines<BuildIndirectParametersPipeline>>()
452 .add_systems(
453 Render,
454 (
455 prepare_preprocess_pipelines.in_set(RenderSystems::Prepare),
456 prepare_preprocess_bind_groups
457 .run_if(resource_exists::<BatchedInstanceBuffers<
458 MeshUniform,
459 MeshInputUniform
460 >>)
461 .in_set(RenderSystems::PrepareBindGroups),
462 write_mesh_culling_data_buffer.in_set(RenderSystems::PrepareResourcesFlush),
463 ),
464 )
465 .add_render_graph_node::<ClearIndirectParametersMetadataNode>(
466 Core3d,
467 NodePbr::ClearIndirectParametersMetadata
468 )
469 .add_render_graph_node::<EarlyGpuPreprocessNode>(Core3d, NodePbr::EarlyGpuPreprocess)
470 .add_render_graph_node::<LateGpuPreprocessNode>(Core3d, NodePbr::LateGpuPreprocess)
471 .add_render_graph_node::<EarlyPrepassBuildIndirectParametersNode>(
472 Core3d,
473 NodePbr::EarlyPrepassBuildIndirectParameters,
474 )
475 .add_render_graph_node::<LatePrepassBuildIndirectParametersNode>(
476 Core3d,
477 NodePbr::LatePrepassBuildIndirectParameters,
478 )
479 .add_render_graph_node::<MainBuildIndirectParametersNode>(
480 Core3d,
481 NodePbr::MainBuildIndirectParameters,
482 )
483 .add_render_graph_edges(
484 Core3d,
485 (
486 NodePbr::ClearIndirectParametersMetadata,
487 NodePbr::EarlyGpuPreprocess,
488 NodePbr::EarlyPrepassBuildIndirectParameters,
489 Node3d::EarlyPrepass,
490 Node3d::EarlyDeferredPrepass,
491 Node3d::EarlyDownsampleDepth,
492 NodePbr::LateGpuPreprocess,
493 NodePbr::LatePrepassBuildIndirectParameters,
494 Node3d::LatePrepass,
495 Node3d::LateDeferredPrepass,
496 NodePbr::MainBuildIndirectParameters,
497 Node3d::StartMainPass,
498 ),
499 ).add_render_graph_edges(
500 Core3d,
501 (
502 NodePbr::EarlyPrepassBuildIndirectParameters,
503 NodePbr::EarlyShadowPass,
504 Node3d::EarlyDownsampleDepth,
505 )
506 ).add_render_graph_edges(
507 Core3d,
508 (
509 NodePbr::LatePrepassBuildIndirectParameters,
510 NodePbr::LateShadowPass,
511 NodePbr::MainBuildIndirectParameters,
512 )
513 );
514 }
515}
516
517impl Node for ClearIndirectParametersMetadataNode {
518 fn run<'w>(
519 &self,
520 _: &mut RenderGraphContext,
521 render_context: &mut RenderContext<'w>,
522 world: &'w World,
523 ) -> Result<(), NodeRunError> {
524 let Some(indirect_parameters_buffers) = world.get_resource::<IndirectParametersBuffers>()
525 else {
526 return Ok(());
527 };
528
529 for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() {
531 if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
532 .indexed
533 .gpu_metadata_buffer()
534 {
535 render_context.command_encoder().clear_buffer(
536 indexed_gpu_metadata_buffer,
537 0,
538 Some(
539 phase_indirect_parameters_buffers.indexed.batch_count() as u64
540 * size_of::<IndirectParametersGpuMetadata>() as u64,
541 ),
542 );
543 }
544
545 if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
546 .non_indexed
547 .gpu_metadata_buffer()
548 {
549 render_context.command_encoder().clear_buffer(
550 non_indexed_gpu_metadata_buffer,
551 0,
552 Some(
553 phase_indirect_parameters_buffers.non_indexed.batch_count() as u64
554 * size_of::<IndirectParametersGpuMetadata>() as u64,
555 ),
556 );
557 }
558 }
559
560 Ok(())
561 }
562}
563
564impl FromWorld for EarlyGpuPreprocessNode {
565 fn from_world(world: &mut World) -> Self {
566 Self {
567 view_query: QueryState::new(world),
568 main_view_query: QueryState::new(world),
569 }
570 }
571}
572
573impl Node for EarlyGpuPreprocessNode {
574 fn update(&mut self, world: &mut World) {
575 self.view_query.update_archetypes(world);
576 self.main_view_query.update_archetypes(world);
577 }
578
579 fn run<'w>(
580 &self,
581 graph: &mut RenderGraphContext,
582 render_context: &mut RenderContext<'w>,
583 world: &'w World,
584 ) -> Result<(), NodeRunError> {
585 let diagnostics = render_context.diagnostic_recorder();
586
587 let batched_instance_buffers =
589 world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
590
591 let pipeline_cache = world.resource::<PipelineCache>();
592 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
593
594 let mut compute_pass =
595 render_context
596 .command_encoder()
597 .begin_compute_pass(&ComputePassDescriptor {
598 label: Some("early_mesh_preprocessing"),
599 timestamp_writes: None,
600 });
601 let pass_span = diagnostics.pass_span(&mut compute_pass, "early_mesh_preprocessing");
602
603 let mut all_views: SmallVec<[_; 8]> = SmallVec::new();
604 all_views.push(graph.view_entity());
605 if let Ok(shadow_cascade_views) =
606 self.main_view_query.get_manual(world, graph.view_entity())
607 {
608 all_views.extend(shadow_cascade_views.lights.iter().copied());
609 }
610
611 for view_entity in all_views {
614 let Ok((
615 view,
616 bind_groups,
617 view_uniform_offset,
618 no_indirect_drawing,
619 occlusion_culling,
620 )) = self.view_query.get_manual(world, view_entity)
621 else {
622 continue;
623 };
624
625 let Some(bind_groups) = bind_groups else {
626 continue;
627 };
628 let Some(view_uniform_offset) = view_uniform_offset else {
629 continue;
630 };
631
632 let maybe_pipeline_id = if no_indirect_drawing {
635 preprocess_pipelines.direct_preprocess.pipeline_id
636 } else if occlusion_culling {
637 preprocess_pipelines
638 .early_gpu_occlusion_culling_preprocess
639 .pipeline_id
640 } else {
641 preprocess_pipelines
642 .gpu_frustum_culling_preprocess
643 .pipeline_id
644 };
645
646 let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
648 warn!("The build mesh uniforms pipeline wasn't ready");
649 continue;
650 };
651
652 let Some(preprocess_pipeline) =
653 pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
654 else {
655 continue;
657 };
658
659 compute_pass.set_pipeline(preprocess_pipeline);
660
661 for (phase_type_id, batched_phase_instance_buffers) in
663 &batched_instance_buffers.phase_instance_buffers
664 {
665 let Some(work_item_buffers) = batched_phase_instance_buffers
667 .work_item_buffers
668 .get(&view.retained_view_entity)
669 else {
670 continue;
671 };
672
673 let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else {
675 continue;
676 };
677
678 let dynamic_offsets = [view_uniform_offset.offset];
682
683 match *phase_bind_groups {
685 PhasePreprocessBindGroups::Direct(ref bind_group) => {
686 let PreprocessWorkItemBuffers::Direct(work_item_buffer) = work_item_buffers
689 else {
690 continue;
691 };
692 compute_pass.set_bind_group(0, bind_group, &dynamic_offsets);
693 let workgroup_count = work_item_buffer.len().div_ceil(WORKGROUP_SIZE);
694 if workgroup_count > 0 {
695 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
696 }
697 }
698
699 PhasePreprocessBindGroups::IndirectFrustumCulling {
700 indexed: ref maybe_indexed_bind_group,
701 non_indexed: ref maybe_non_indexed_bind_group,
702 }
703 | PhasePreprocessBindGroups::IndirectOcclusionCulling {
704 early_indexed: ref maybe_indexed_bind_group,
705 early_non_indexed: ref maybe_non_indexed_bind_group,
706 ..
707 } => {
708 let PreprocessWorkItemBuffers::Indirect {
711 indexed: indexed_buffer,
712 non_indexed: non_indexed_buffer,
713 ..
714 } = work_item_buffers
715 else {
716 continue;
717 };
718
719 if let Some(indexed_bind_group) = maybe_indexed_bind_group {
721 if let PreprocessWorkItemBuffers::Indirect {
722 gpu_occlusion_culling:
723 Some(GpuOcclusionCullingWorkItemBuffers {
724 late_indirect_parameters_indexed_offset,
725 ..
726 }),
727 ..
728 } = *work_item_buffers
729 {
730 compute_pass.set_push_constants(
731 0,
732 bytemuck::bytes_of(&late_indirect_parameters_indexed_offset),
733 );
734 }
735
736 compute_pass.set_bind_group(0, indexed_bind_group, &dynamic_offsets);
737 let workgroup_count = indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
738 if workgroup_count > 0 {
739 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
740 }
741 }
742
743 if let Some(non_indexed_bind_group) = maybe_non_indexed_bind_group {
745 if let PreprocessWorkItemBuffers::Indirect {
746 gpu_occlusion_culling:
747 Some(GpuOcclusionCullingWorkItemBuffers {
748 late_indirect_parameters_non_indexed_offset,
749 ..
750 }),
751 ..
752 } = *work_item_buffers
753 {
754 compute_pass.set_push_constants(
755 0,
756 bytemuck::bytes_of(
757 &late_indirect_parameters_non_indexed_offset,
758 ),
759 );
760 }
761
762 compute_pass.set_bind_group(
763 0,
764 non_indexed_bind_group,
765 &dynamic_offsets,
766 );
767 let workgroup_count = non_indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
768 if workgroup_count > 0 {
769 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
770 }
771 }
772 }
773 }
774 }
775 }
776
777 pass_span.end(&mut compute_pass);
778
779 Ok(())
780 }
781}
782
783impl FromWorld for EarlyPrepassBuildIndirectParametersNode {
784 fn from_world(world: &mut World) -> Self {
785 Self {
786 view_query: QueryState::new(world),
787 }
788 }
789}
790
791impl FromWorld for LatePrepassBuildIndirectParametersNode {
792 fn from_world(world: &mut World) -> Self {
793 Self {
794 view_query: QueryState::new(world),
795 }
796 }
797}
798
799impl FromWorld for MainBuildIndirectParametersNode {
800 fn from_world(world: &mut World) -> Self {
801 Self {
802 view_query: QueryState::new(world),
803 }
804 }
805}
806
807impl FromWorld for LateGpuPreprocessNode {
808 fn from_world(world: &mut World) -> Self {
809 Self {
810 view_query: QueryState::new(world),
811 }
812 }
813}
814
815impl Node for LateGpuPreprocessNode {
816 fn update(&mut self, world: &mut World) {
817 self.view_query.update_archetypes(world);
818 }
819
820 fn run<'w>(
821 &self,
822 _: &mut RenderGraphContext,
823 render_context: &mut RenderContext<'w>,
824 world: &'w World,
825 ) -> Result<(), NodeRunError> {
826 let diagnostics = render_context.diagnostic_recorder();
827
828 let batched_instance_buffers =
830 world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
831
832 let pipeline_cache = world.resource::<PipelineCache>();
833 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
834
835 let mut compute_pass =
836 render_context
837 .command_encoder()
838 .begin_compute_pass(&ComputePassDescriptor {
839 label: Some("late_mesh_preprocessing"),
840 timestamp_writes: None,
841 });
842 let pass_span = diagnostics.pass_span(&mut compute_pass, "late_mesh_preprocessing");
843
844 for (view, bind_groups, view_uniform_offset) in self.view_query.iter_manual(world) {
846 let maybe_pipeline_id = preprocess_pipelines
847 .late_gpu_occlusion_culling_preprocess
848 .pipeline_id;
849
850 let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
852 warn!("The build mesh uniforms pipeline wasn't ready");
853 return Ok(());
854 };
855
856 let Some(preprocess_pipeline) =
857 pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
858 else {
859 return Ok(());
861 };
862
863 compute_pass.set_pipeline(preprocess_pipeline);
864
865 for (phase_type_id, batched_phase_instance_buffers) in
868 &batched_instance_buffers.phase_instance_buffers
869 {
870 let UntypedPhaseBatchedInstanceBuffers {
871 ref work_item_buffers,
872 ref late_indexed_indirect_parameters_buffer,
873 ref late_non_indexed_indirect_parameters_buffer,
874 ..
875 } = *batched_phase_instance_buffers;
876
877 let Some(phase_work_item_buffers) =
879 work_item_buffers.get(&view.retained_view_entity)
880 else {
881 continue;
882 };
883
884 let (
885 PreprocessWorkItemBuffers::Indirect {
886 gpu_occlusion_culling:
887 Some(GpuOcclusionCullingWorkItemBuffers {
888 late_indirect_parameters_indexed_offset,
889 late_indirect_parameters_non_indexed_offset,
890 ..
891 }),
892 ..
893 },
894 Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
895 late_indexed: maybe_late_indexed_bind_group,
896 late_non_indexed: maybe_late_non_indexed_bind_group,
897 ..
898 }),
899 Some(late_indexed_indirect_parameters_buffer),
900 Some(late_non_indexed_indirect_parameters_buffer),
901 ) = (
902 phase_work_item_buffers,
903 bind_groups.get(phase_type_id),
904 late_indexed_indirect_parameters_buffer.buffer(),
905 late_non_indexed_indirect_parameters_buffer.buffer(),
906 )
907 else {
908 continue;
909 };
910
911 let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![];
912 dynamic_offsets.push(view_uniform_offset.offset);
913
914 if let Some(late_indexed_bind_group) = maybe_late_indexed_bind_group {
921 compute_pass.set_push_constants(
922 0,
923 bytemuck::bytes_of(late_indirect_parameters_indexed_offset),
924 );
925
926 compute_pass.set_bind_group(0, late_indexed_bind_group, &dynamic_offsets);
927 compute_pass.dispatch_workgroups_indirect(
928 late_indexed_indirect_parameters_buffer,
929 (*late_indirect_parameters_indexed_offset as u64)
930 * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
931 );
932 }
933
934 if let Some(late_non_indexed_bind_group) = maybe_late_non_indexed_bind_group {
936 compute_pass.set_push_constants(
937 0,
938 bytemuck::bytes_of(late_indirect_parameters_non_indexed_offset),
939 );
940
941 compute_pass.set_bind_group(0, late_non_indexed_bind_group, &dynamic_offsets);
942 compute_pass.dispatch_workgroups_indirect(
943 late_non_indexed_indirect_parameters_buffer,
944 (*late_indirect_parameters_non_indexed_offset as u64)
945 * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
946 );
947 }
948 }
949 }
950
951 pass_span.end(&mut compute_pass);
952
953 Ok(())
954 }
955}
956
957impl Node for EarlyPrepassBuildIndirectParametersNode {
958 fn update(&mut self, world: &mut World) {
959 self.view_query.update_archetypes(world);
960 }
961
962 fn run<'w>(
963 &self,
964 _: &mut RenderGraphContext,
965 render_context: &mut RenderContext<'w>,
966 world: &'w World,
967 ) -> Result<(), NodeRunError> {
968 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
969
970 if self.view_query.iter_manual(world).next().is_none() {
973 return Ok(());
974 }
975
976 run_build_indirect_parameters_node(
977 render_context,
978 world,
979 &preprocess_pipelines.early_phase,
980 "early_prepass_indirect_parameters_building",
981 )
982 }
983}
984
985impl Node for LatePrepassBuildIndirectParametersNode {
986 fn update(&mut self, world: &mut World) {
987 self.view_query.update_archetypes(world);
988 }
989
990 fn run<'w>(
991 &self,
992 _: &mut RenderGraphContext,
993 render_context: &mut RenderContext<'w>,
994 world: &'w World,
995 ) -> Result<(), NodeRunError> {
996 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
997
998 if self.view_query.iter_manual(world).next().is_none() {
1001 return Ok(());
1002 }
1003
1004 run_build_indirect_parameters_node(
1005 render_context,
1006 world,
1007 &preprocess_pipelines.late_phase,
1008 "late_prepass_indirect_parameters_building",
1009 )
1010 }
1011}
1012
1013impl Node for MainBuildIndirectParametersNode {
1014 fn update(&mut self, world: &mut World) {
1015 self.view_query.update_archetypes(world);
1016 }
1017
1018 fn run<'w>(
1019 &self,
1020 _: &mut RenderGraphContext,
1021 render_context: &mut RenderContext<'w>,
1022 world: &'w World,
1023 ) -> Result<(), NodeRunError> {
1024 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
1025
1026 run_build_indirect_parameters_node(
1027 render_context,
1028 world,
1029 &preprocess_pipelines.main_phase,
1030 "main_indirect_parameters_building",
1031 )
1032 }
1033}
1034
1035fn run_build_indirect_parameters_node(
1036 render_context: &mut RenderContext,
1037 world: &World,
1038 preprocess_phase_pipelines: &PreprocessPhasePipelines,
1039 label: &'static str,
1040) -> Result<(), NodeRunError> {
1041 let Some(build_indirect_params_bind_groups) =
1042 world.get_resource::<BuildIndirectParametersBindGroups>()
1043 else {
1044 return Ok(());
1045 };
1046
1047 let diagnostics = render_context.diagnostic_recorder();
1048
1049 let pipeline_cache = world.resource::<PipelineCache>();
1050 let indirect_parameters_buffers = world.resource::<IndirectParametersBuffers>();
1051
1052 let mut compute_pass =
1053 render_context
1054 .command_encoder()
1055 .begin_compute_pass(&ComputePassDescriptor {
1056 label: Some(label),
1057 timestamp_writes: None,
1058 });
1059 let pass_span = diagnostics.pass_span(&mut compute_pass, label);
1060
1061 let (
1063 Some(reset_indirect_batch_sets_pipeline_id),
1064 Some(build_indexed_indirect_params_pipeline_id),
1065 Some(build_non_indexed_indirect_params_pipeline_id),
1066 ) = (
1067 preprocess_phase_pipelines
1068 .reset_indirect_batch_sets
1069 .pipeline_id,
1070 preprocess_phase_pipelines
1071 .gpu_occlusion_culling_build_indexed_indirect_params
1072 .pipeline_id,
1073 preprocess_phase_pipelines
1074 .gpu_occlusion_culling_build_non_indexed_indirect_params
1075 .pipeline_id,
1076 )
1077 else {
1078 warn!("The build indirect parameters pipelines weren't ready");
1079 pass_span.end(&mut compute_pass);
1080 return Ok(());
1081 };
1082
1083 let (
1084 Some(reset_indirect_batch_sets_pipeline),
1085 Some(build_indexed_indirect_params_pipeline),
1086 Some(build_non_indexed_indirect_params_pipeline),
1087 ) = (
1088 pipeline_cache.get_compute_pipeline(reset_indirect_batch_sets_pipeline_id),
1089 pipeline_cache.get_compute_pipeline(build_indexed_indirect_params_pipeline_id),
1090 pipeline_cache.get_compute_pipeline(build_non_indexed_indirect_params_pipeline_id),
1091 )
1092 else {
1093 pass_span.end(&mut compute_pass);
1095 return Ok(());
1096 };
1097
1098 for (phase_type_id, phase_build_indirect_params_bind_groups) in
1101 build_indirect_params_bind_groups.iter()
1102 {
1103 let Some(phase_indirect_parameters_buffers) =
1104 indirect_parameters_buffers.get(phase_type_id)
1105 else {
1106 continue;
1107 };
1108
1109 if let (
1111 Some(reset_indexed_indirect_batch_sets_bind_group),
1112 Some(build_indirect_indexed_params_bind_group),
1113 ) = (
1114 &phase_build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets,
1115 &phase_build_indirect_params_bind_groups.build_indexed_indirect,
1116 ) {
1117 compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1118 compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]);
1119 let workgroup_count = phase_indirect_parameters_buffers
1120 .batch_set_count(true)
1121 .div_ceil(WORKGROUP_SIZE);
1122 if workgroup_count > 0 {
1123 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1124 }
1125
1126 compute_pass.set_pipeline(build_indexed_indirect_params_pipeline);
1127 compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]);
1128 let workgroup_count = phase_indirect_parameters_buffers
1129 .indexed
1130 .batch_count()
1131 .div_ceil(WORKGROUP_SIZE);
1132 if workgroup_count > 0 {
1133 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1134 }
1135 }
1136
1137 if let (
1139 Some(reset_non_indexed_indirect_batch_sets_bind_group),
1140 Some(build_indirect_non_indexed_params_bind_group),
1141 ) = (
1142 &phase_build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets,
1143 &phase_build_indirect_params_bind_groups.build_non_indexed_indirect,
1144 ) {
1145 compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1146 compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]);
1147 let workgroup_count = phase_indirect_parameters_buffers
1148 .batch_set_count(false)
1149 .div_ceil(WORKGROUP_SIZE);
1150 if workgroup_count > 0 {
1151 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1152 }
1153
1154 compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline);
1155 compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]);
1156 let workgroup_count = phase_indirect_parameters_buffers
1157 .non_indexed
1158 .batch_count()
1159 .div_ceil(WORKGROUP_SIZE);
1160 if workgroup_count > 0 {
1161 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1162 }
1163 }
1164 }
1165
1166 pass_span.end(&mut compute_pass);
1167
1168 Ok(())
1169}
1170
1171impl PreprocessPipelines {
1172 pub(crate) fn pipelines_are_loaded(
1175 &self,
1176 pipeline_cache: &PipelineCache,
1177 preprocessing_support: &GpuPreprocessingSupport,
1178 ) -> bool {
1179 match preprocessing_support.max_supported_mode {
1180 GpuPreprocessingMode::None => false,
1181 GpuPreprocessingMode::PreprocessingOnly => {
1182 self.direct_preprocess.is_loaded(pipeline_cache)
1183 && self
1184 .gpu_frustum_culling_preprocess
1185 .is_loaded(pipeline_cache)
1186 }
1187 GpuPreprocessingMode::Culling => {
1188 self.direct_preprocess.is_loaded(pipeline_cache)
1189 && self
1190 .gpu_frustum_culling_preprocess
1191 .is_loaded(pipeline_cache)
1192 && self
1193 .early_gpu_occlusion_culling_preprocess
1194 .is_loaded(pipeline_cache)
1195 && self
1196 .late_gpu_occlusion_culling_preprocess
1197 .is_loaded(pipeline_cache)
1198 && self
1199 .gpu_frustum_culling_build_indexed_indirect_params
1200 .is_loaded(pipeline_cache)
1201 && self
1202 .gpu_frustum_culling_build_non_indexed_indirect_params
1203 .is_loaded(pipeline_cache)
1204 && self.early_phase.is_loaded(pipeline_cache)
1205 && self.late_phase.is_loaded(pipeline_cache)
1206 && self.main_phase.is_loaded(pipeline_cache)
1207 }
1208 }
1209 }
1210}
1211
1212impl PreprocessPhasePipelines {
1213 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1214 self.reset_indirect_batch_sets.is_loaded(pipeline_cache)
1215 && self
1216 .gpu_occlusion_culling_build_indexed_indirect_params
1217 .is_loaded(pipeline_cache)
1218 && self
1219 .gpu_occlusion_culling_build_non_indexed_indirect_params
1220 .is_loaded(pipeline_cache)
1221 }
1222}
1223
1224impl PreprocessPipeline {
1225 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1226 self.pipeline_id
1227 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1228 }
1229}
1230
1231impl ResetIndirectBatchSetsPipeline {
1232 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1233 self.pipeline_id
1234 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1235 }
1236}
1237
1238impl BuildIndirectParametersPipeline {
1239 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1242 self.pipeline_id
1243 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1244 }
1245}
1246
1247impl SpecializedComputePipeline for PreprocessPipeline {
1248 type Key = PreprocessPipelineKey;
1249
1250 fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1251 let mut shader_defs = vec!["WRITE_INDIRECT_PARAMETERS_METADATA".into()];
1252 if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1253 shader_defs.push("INDIRECT".into());
1254 shader_defs.push("FRUSTUM_CULLING".into());
1255 }
1256 if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1257 shader_defs.push("OCCLUSION_CULLING".into());
1258 if key.contains(PreprocessPipelineKey::EARLY_PHASE) {
1259 shader_defs.push("EARLY_PHASE".into());
1260 } else {
1261 shader_defs.push("LATE_PHASE".into());
1262 }
1263 }
1264
1265 ComputePipelineDescriptor {
1266 label: Some(
1267 format!(
1268 "mesh preprocessing ({})",
1269 if key.contains(
1270 PreprocessPipelineKey::OCCLUSION_CULLING
1271 | PreprocessPipelineKey::EARLY_PHASE
1272 ) {
1273 "early GPU occlusion culling"
1274 } else if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1275 "late GPU occlusion culling"
1276 } else if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1277 "GPU frustum culling"
1278 } else {
1279 "direct"
1280 }
1281 )
1282 .into(),
1283 ),
1284 layout: vec![self.bind_group_layout.clone()],
1285 push_constant_ranges: if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1286 vec![PushConstantRange {
1287 stages: ShaderStages::COMPUTE,
1288 range: 0..4,
1289 }]
1290 } else {
1291 vec![]
1292 },
1293 shader: self.shader.clone(),
1294 shader_defs,
1295 ..default()
1296 }
1297 }
1298}
1299
1300impl FromWorld for PreprocessPipelines {
1301 fn from_world(world: &mut World) -> Self {
1302 let render_device = world.resource::<RenderDevice>();
1303
1304 let direct_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries();
1307 let gpu_frustum_culling_bind_group_layout_entries = gpu_culling_bind_group_layout_entries();
1308 let gpu_early_occlusion_culling_bind_group_layout_entries =
1309 gpu_occlusion_culling_bind_group_layout_entries().extend_with_indices(((
1310 11,
1311 storage_buffer::<PreprocessWorkItem>(false),
1312 ),));
1313 let gpu_late_occlusion_culling_bind_group_layout_entries =
1314 gpu_occlusion_culling_bind_group_layout_entries();
1315
1316 let reset_indirect_batch_sets_bind_group_layout_entries =
1317 DynamicBindGroupLayoutEntries::sequential(
1318 ShaderStages::COMPUTE,
1319 (storage_buffer::<IndirectBatchSet>(false),),
1320 );
1321
1322 let build_indexed_indirect_params_bind_group_layout_entries =
1325 build_indirect_params_bind_group_layout_entries()
1326 .extend_sequential((storage_buffer::<IndirectParametersIndexed>(false),));
1327 let build_non_indexed_indirect_params_bind_group_layout_entries =
1328 build_indirect_params_bind_group_layout_entries()
1329 .extend_sequential((storage_buffer::<IndirectParametersNonIndexed>(false),));
1330
1331 let direct_bind_group_layout = render_device.create_bind_group_layout(
1333 "build mesh uniforms direct bind group layout",
1334 &direct_bind_group_layout_entries,
1335 );
1336 let gpu_frustum_culling_bind_group_layout = render_device.create_bind_group_layout(
1337 "build mesh uniforms GPU frustum culling bind group layout",
1338 &gpu_frustum_culling_bind_group_layout_entries,
1339 );
1340 let gpu_early_occlusion_culling_bind_group_layout = render_device.create_bind_group_layout(
1341 "build mesh uniforms GPU early occlusion culling bind group layout",
1342 &gpu_early_occlusion_culling_bind_group_layout_entries,
1343 );
1344 let gpu_late_occlusion_culling_bind_group_layout = render_device.create_bind_group_layout(
1345 "build mesh uniforms GPU late occlusion culling bind group layout",
1346 &gpu_late_occlusion_culling_bind_group_layout_entries,
1347 );
1348 let reset_indirect_batch_sets_bind_group_layout = render_device.create_bind_group_layout(
1349 "reset indirect batch sets bind group layout",
1350 &reset_indirect_batch_sets_bind_group_layout_entries,
1351 );
1352 let build_indexed_indirect_params_bind_group_layout = render_device
1353 .create_bind_group_layout(
1354 "build indexed indirect parameters bind group layout",
1355 &build_indexed_indirect_params_bind_group_layout_entries,
1356 );
1357 let build_non_indexed_indirect_params_bind_group_layout = render_device
1358 .create_bind_group_layout(
1359 "build non-indexed indirect parameters bind group layout",
1360 &build_non_indexed_indirect_params_bind_group_layout_entries,
1361 );
1362
1363 let preprocess_shader = load_embedded_asset!(world, "mesh_preprocess.wgsl");
1364 let reset_indirect_batch_sets_shader =
1365 load_embedded_asset!(world, "reset_indirect_batch_sets.wgsl");
1366 let build_indirect_params_shader =
1367 load_embedded_asset!(world, "build_indirect_params.wgsl");
1368
1369 let preprocess_phase_pipelines = PreprocessPhasePipelines {
1370 reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline {
1371 bind_group_layout: reset_indirect_batch_sets_bind_group_layout.clone(),
1372 shader: reset_indirect_batch_sets_shader,
1373 pipeline_id: None,
1374 },
1375 gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1376 bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1377 shader: build_indirect_params_shader.clone(),
1378 pipeline_id: None,
1379 },
1380 gpu_occlusion_culling_build_non_indexed_indirect_params:
1381 BuildIndirectParametersPipeline {
1382 bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1383 shader: build_indirect_params_shader.clone(),
1384 pipeline_id: None,
1385 },
1386 };
1387
1388 PreprocessPipelines {
1389 direct_preprocess: PreprocessPipeline {
1390 bind_group_layout: direct_bind_group_layout,
1391 shader: preprocess_shader.clone(),
1392 pipeline_id: None,
1393 },
1394 gpu_frustum_culling_preprocess: PreprocessPipeline {
1395 bind_group_layout: gpu_frustum_culling_bind_group_layout,
1396 shader: preprocess_shader.clone(),
1397 pipeline_id: None,
1398 },
1399 early_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1400 bind_group_layout: gpu_early_occlusion_culling_bind_group_layout,
1401 shader: preprocess_shader.clone(),
1402 pipeline_id: None,
1403 },
1404 late_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1405 bind_group_layout: gpu_late_occlusion_culling_bind_group_layout,
1406 shader: preprocess_shader,
1407 pipeline_id: None,
1408 },
1409 gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1410 bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1411 shader: build_indirect_params_shader.clone(),
1412 pipeline_id: None,
1413 },
1414 gpu_frustum_culling_build_non_indexed_indirect_params:
1415 BuildIndirectParametersPipeline {
1416 bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1417 shader: build_indirect_params_shader,
1418 pipeline_id: None,
1419 },
1420 early_phase: preprocess_phase_pipelines.clone(),
1421 late_phase: preprocess_phase_pipelines.clone(),
1422 main_phase: preprocess_phase_pipelines.clone(),
1423 }
1424 }
1425}
1426
1427fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1428 DynamicBindGroupLayoutEntries::new_with_indices(
1429 ShaderStages::COMPUTE,
1430 (
1431 (
1433 0,
1434 uniform_buffer::<ViewUniform>(true),
1435 ),
1436 (3, storage_buffer_read_only::<MeshInputUniform>(false)),
1438 (4, storage_buffer_read_only::<MeshInputUniform>(false)),
1440 (5, storage_buffer_read_only::<PreprocessWorkItem>(false)),
1442 (6, storage_buffer::<MeshUniform>(false)),
1444 ),
1445 )
1446}
1447
1448fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1451 DynamicBindGroupLayoutEntries::new_with_indices(
1452 ShaderStages::COMPUTE,
1453 (
1454 (0, storage_buffer_read_only::<MeshInputUniform>(false)),
1455 (
1456 1,
1457 storage_buffer_read_only::<IndirectParametersCpuMetadata>(false),
1458 ),
1459 (
1460 2,
1461 storage_buffer_read_only::<IndirectParametersGpuMetadata>(false),
1462 ),
1463 (3, storage_buffer::<IndirectBatchSet>(false)),
1464 ),
1465 )
1466}
1467
1468fn gpu_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1471 preprocess_direct_bind_group_layout_entries().extend_with_indices((
1474 (
1476 7,
1477 storage_buffer_read_only::<IndirectParametersCpuMetadata>(
1478 false,
1479 ),
1480 ),
1481 (
1483 8,
1484 storage_buffer::<IndirectParametersGpuMetadata>(false),
1485 ),
1486 (
1488 9,
1489 storage_buffer_read_only::<MeshCullingData>(false),
1490 ),
1491 ))
1492}
1493
1494fn gpu_occlusion_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1495 gpu_culling_bind_group_layout_entries().extend_with_indices((
1496 (
1497 2,
1498 uniform_buffer::<PreviousViewData>(false),
1499 ),
1500 (
1501 10,
1502 texture_2d(TextureSampleType::Float { filterable: true }),
1503 ),
1504 (
1505 12,
1506 storage_buffer::<LatePreprocessWorkItemIndirectParameters>(
1507 false,
1508 ),
1509 ),
1510 ))
1511}
1512
1513pub fn prepare_preprocess_pipelines(
1515 pipeline_cache: Res<PipelineCache>,
1516 render_device: Res<RenderDevice>,
1517 mut specialized_preprocess_pipelines: ResMut<SpecializedComputePipelines<PreprocessPipeline>>,
1518 mut specialized_reset_indirect_batch_sets_pipelines: ResMut<
1519 SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1520 >,
1521 mut specialized_build_indirect_parameters_pipelines: ResMut<
1522 SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1523 >,
1524 preprocess_pipelines: ResMut<PreprocessPipelines>,
1525 gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1526) {
1527 let preprocess_pipelines = preprocess_pipelines.into_inner();
1528
1529 preprocess_pipelines.direct_preprocess.prepare(
1530 &pipeline_cache,
1531 &mut specialized_preprocess_pipelines,
1532 PreprocessPipelineKey::empty(),
1533 );
1534 preprocess_pipelines.gpu_frustum_culling_preprocess.prepare(
1535 &pipeline_cache,
1536 &mut specialized_preprocess_pipelines,
1537 PreprocessPipelineKey::FRUSTUM_CULLING,
1538 );
1539
1540 if gpu_preprocessing_support.is_culling_supported() {
1541 preprocess_pipelines
1542 .early_gpu_occlusion_culling_preprocess
1543 .prepare(
1544 &pipeline_cache,
1545 &mut specialized_preprocess_pipelines,
1546 PreprocessPipelineKey::FRUSTUM_CULLING
1547 | PreprocessPipelineKey::OCCLUSION_CULLING
1548 | PreprocessPipelineKey::EARLY_PHASE,
1549 );
1550 preprocess_pipelines
1551 .late_gpu_occlusion_culling_preprocess
1552 .prepare(
1553 &pipeline_cache,
1554 &mut specialized_preprocess_pipelines,
1555 PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING,
1556 );
1557 }
1558
1559 let mut build_indirect_parameters_pipeline_key = BuildIndirectParametersPipelineKey::empty();
1560
1561 if render_device
1564 .wgpu_device()
1565 .features()
1566 .contains(WgpuFeatures::MULTI_DRAW_INDIRECT_COUNT)
1567 {
1568 build_indirect_parameters_pipeline_key
1569 .insert(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED);
1570 }
1571
1572 preprocess_pipelines
1573 .gpu_frustum_culling_build_indexed_indirect_params
1574 .prepare(
1575 &pipeline_cache,
1576 &mut specialized_build_indirect_parameters_pipelines,
1577 build_indirect_parameters_pipeline_key | BuildIndirectParametersPipelineKey::INDEXED,
1578 );
1579 preprocess_pipelines
1580 .gpu_frustum_culling_build_non_indexed_indirect_params
1581 .prepare(
1582 &pipeline_cache,
1583 &mut specialized_build_indirect_parameters_pipelines,
1584 build_indirect_parameters_pipeline_key,
1585 );
1586
1587 if !gpu_preprocessing_support.is_culling_supported() {
1588 return;
1589 }
1590
1591 for (preprocess_phase_pipelines, build_indirect_parameters_phase_pipeline_key) in [
1592 (
1593 &mut preprocess_pipelines.early_phase,
1594 BuildIndirectParametersPipelineKey::EARLY_PHASE,
1595 ),
1596 (
1597 &mut preprocess_pipelines.late_phase,
1598 BuildIndirectParametersPipelineKey::LATE_PHASE,
1599 ),
1600 (
1601 &mut preprocess_pipelines.main_phase,
1602 BuildIndirectParametersPipelineKey::MAIN_PHASE,
1603 ),
1604 ] {
1605 preprocess_phase_pipelines
1606 .reset_indirect_batch_sets
1607 .prepare(
1608 &pipeline_cache,
1609 &mut specialized_reset_indirect_batch_sets_pipelines,
1610 );
1611 preprocess_phase_pipelines
1612 .gpu_occlusion_culling_build_indexed_indirect_params
1613 .prepare(
1614 &pipeline_cache,
1615 &mut specialized_build_indirect_parameters_pipelines,
1616 build_indirect_parameters_pipeline_key
1617 | build_indirect_parameters_phase_pipeline_key
1618 | BuildIndirectParametersPipelineKey::INDEXED
1619 | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1620 );
1621 preprocess_phase_pipelines
1622 .gpu_occlusion_culling_build_non_indexed_indirect_params
1623 .prepare(
1624 &pipeline_cache,
1625 &mut specialized_build_indirect_parameters_pipelines,
1626 build_indirect_parameters_pipeline_key
1627 | build_indirect_parameters_phase_pipeline_key
1628 | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1629 );
1630 }
1631}
1632
1633impl PreprocessPipeline {
1634 fn prepare(
1635 &mut self,
1636 pipeline_cache: &PipelineCache,
1637 pipelines: &mut SpecializedComputePipelines<PreprocessPipeline>,
1638 key: PreprocessPipelineKey,
1639 ) {
1640 if self.pipeline_id.is_some() {
1641 return;
1642 }
1643
1644 let preprocess_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1645 self.pipeline_id = Some(preprocess_pipeline_id);
1646 }
1647}
1648
1649impl SpecializedComputePipeline for ResetIndirectBatchSetsPipeline {
1650 type Key = ();
1651
1652 fn specialize(&self, _: Self::Key) -> ComputePipelineDescriptor {
1653 ComputePipelineDescriptor {
1654 label: Some("reset indirect batch sets".into()),
1655 layout: vec![self.bind_group_layout.clone()],
1656 shader: self.shader.clone(),
1657 ..default()
1658 }
1659 }
1660}
1661
1662impl SpecializedComputePipeline for BuildIndirectParametersPipeline {
1663 type Key = BuildIndirectParametersPipelineKey;
1664
1665 fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1666 let mut shader_defs = vec![];
1667 if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1668 shader_defs.push("INDEXED".into());
1669 }
1670 if key.contains(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED) {
1671 shader_defs.push("MULTI_DRAW_INDIRECT_COUNT_SUPPORTED".into());
1672 }
1673 if key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1674 shader_defs.push("OCCLUSION_CULLING".into());
1675 }
1676 if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1677 shader_defs.push("EARLY_PHASE".into());
1678 }
1679 if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1680 shader_defs.push("LATE_PHASE".into());
1681 }
1682 if key.contains(BuildIndirectParametersPipelineKey::MAIN_PHASE) {
1683 shader_defs.push("MAIN_PHASE".into());
1684 }
1685
1686 let label = format!(
1687 "{} build {}indexed indirect parameters",
1688 if !key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1689 "frustum culling"
1690 } else if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1691 "early occlusion culling"
1692 } else if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1693 "late occlusion culling"
1694 } else {
1695 "main occlusion culling"
1696 },
1697 if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1698 ""
1699 } else {
1700 "non-"
1701 }
1702 );
1703
1704 ComputePipelineDescriptor {
1705 label: Some(label.into()),
1706 layout: vec![self.bind_group_layout.clone()],
1707 shader: self.shader.clone(),
1708 shader_defs,
1709 ..default()
1710 }
1711 }
1712}
1713
1714impl ResetIndirectBatchSetsPipeline {
1715 fn prepare(
1716 &mut self,
1717 pipeline_cache: &PipelineCache,
1718 pipelines: &mut SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1719 ) {
1720 if self.pipeline_id.is_some() {
1721 return;
1722 }
1723
1724 let reset_indirect_batch_sets_pipeline_id = pipelines.specialize(pipeline_cache, self, ());
1725 self.pipeline_id = Some(reset_indirect_batch_sets_pipeline_id);
1726 }
1727}
1728
1729impl BuildIndirectParametersPipeline {
1730 fn prepare(
1731 &mut self,
1732 pipeline_cache: &PipelineCache,
1733 pipelines: &mut SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1734 key: BuildIndirectParametersPipelineKey,
1735 ) {
1736 if self.pipeline_id.is_some() {
1737 return;
1738 }
1739
1740 let build_indirect_parameters_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1741 self.pipeline_id = Some(build_indirect_parameters_pipeline_id);
1742 }
1743}
1744
1745#[expect(
1748 clippy::too_many_arguments,
1749 reason = "it's a system that needs a lot of arguments"
1750)]
1751pub fn prepare_preprocess_bind_groups(
1752 mut commands: Commands,
1753 views: Query<(Entity, &ExtractedView)>,
1754 view_depth_pyramids: Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1755 render_device: Res<RenderDevice>,
1756 batched_instance_buffers: Res<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
1757 indirect_parameters_buffers: Res<IndirectParametersBuffers>,
1758 mesh_culling_data_buffer: Res<MeshCullingDataBuffer>,
1759 view_uniforms: Res<ViewUniforms>,
1760 previous_view_uniforms: Res<PreviousViewUniforms>,
1761 pipelines: Res<PreprocessPipelines>,
1762) {
1763 let BatchedInstanceBuffers {
1765 current_input_buffer: current_input_buffer_vec,
1766 previous_input_buffer: previous_input_buffer_vec,
1767 phase_instance_buffers,
1768 } = batched_instance_buffers.into_inner();
1769
1770 let (Some(current_input_buffer), Some(previous_input_buffer)) = (
1771 current_input_buffer_vec.buffer().buffer(),
1772 previous_input_buffer_vec.buffer().buffer(),
1773 ) else {
1774 return;
1775 };
1776
1777 let mut any_indirect = false;
1780
1781 for (view_entity, view) in &views {
1783 let mut bind_groups = TypeIdMap::default();
1784
1785 for (phase_type_id, phase_instance_buffers) in phase_instance_buffers {
1787 let UntypedPhaseBatchedInstanceBuffers {
1788 data_buffer: ref data_buffer_vec,
1789 ref work_item_buffers,
1790 ref late_indexed_indirect_parameters_buffer,
1791 ref late_non_indexed_indirect_parameters_buffer,
1792 } = *phase_instance_buffers;
1793
1794 let Some(data_buffer) = data_buffer_vec.buffer() else {
1795 continue;
1796 };
1797
1798 let Some(phase_indirect_parameters_buffers) =
1800 indirect_parameters_buffers.get(phase_type_id)
1801 else {
1802 continue;
1803 };
1804
1805 let Some(work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) else {
1806 continue;
1807 };
1808
1809 let preprocess_bind_group_builder = PreprocessBindGroupBuilder {
1811 view: view_entity,
1812 late_indexed_indirect_parameters_buffer,
1813 late_non_indexed_indirect_parameters_buffer,
1814 render_device: &render_device,
1815 phase_indirect_parameters_buffers,
1816 mesh_culling_data_buffer: &mesh_culling_data_buffer,
1817 view_uniforms: &view_uniforms,
1818 previous_view_uniforms: &previous_view_uniforms,
1819 pipelines: &pipelines,
1820 current_input_buffer,
1821 previous_input_buffer,
1822 data_buffer,
1823 };
1824
1825 let (was_indirect, bind_group) = match *work_item_buffers {
1828 PreprocessWorkItemBuffers::Direct(ref work_item_buffer) => (
1829 false,
1830 preprocess_bind_group_builder
1831 .create_direct_preprocess_bind_groups(work_item_buffer),
1832 ),
1833
1834 PreprocessWorkItemBuffers::Indirect {
1835 indexed: ref indexed_work_item_buffer,
1836 non_indexed: ref non_indexed_work_item_buffer,
1837 gpu_occlusion_culling: Some(ref gpu_occlusion_culling_work_item_buffers),
1838 } => (
1839 true,
1840 preprocess_bind_group_builder
1841 .create_indirect_occlusion_culling_preprocess_bind_groups(
1842 &view_depth_pyramids,
1843 indexed_work_item_buffer,
1844 non_indexed_work_item_buffer,
1845 gpu_occlusion_culling_work_item_buffers,
1846 ),
1847 ),
1848
1849 PreprocessWorkItemBuffers::Indirect {
1850 indexed: ref indexed_work_item_buffer,
1851 non_indexed: ref non_indexed_work_item_buffer,
1852 gpu_occlusion_culling: None,
1853 } => (
1854 true,
1855 preprocess_bind_group_builder
1856 .create_indirect_frustum_culling_preprocess_bind_groups(
1857 indexed_work_item_buffer,
1858 non_indexed_work_item_buffer,
1859 ),
1860 ),
1861 };
1862
1863 if let Some(bind_group) = bind_group {
1865 any_indirect = any_indirect || was_indirect;
1866 bind_groups.insert(*phase_type_id, bind_group);
1867 }
1868 }
1869
1870 commands
1872 .entity(view_entity)
1873 .insert(PreprocessBindGroups(bind_groups));
1874 }
1875
1876 if any_indirect {
1879 create_build_indirect_parameters_bind_groups(
1880 &mut commands,
1881 &render_device,
1882 &pipelines,
1883 current_input_buffer,
1884 &indirect_parameters_buffers,
1885 );
1886 }
1887}
1888
1889struct PreprocessBindGroupBuilder<'a> {
1892 view: Entity,
1894 late_indexed_indirect_parameters_buffer:
1897 &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1898 late_non_indexed_indirect_parameters_buffer:
1901 &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1902 render_device: &'a RenderDevice,
1904 phase_indirect_parameters_buffers: &'a UntypedPhaseIndirectParametersBuffers,
1906 mesh_culling_data_buffer: &'a MeshCullingDataBuffer,
1908 view_uniforms: &'a ViewUniforms,
1910 previous_view_uniforms: &'a PreviousViewUniforms,
1912 pipelines: &'a PreprocessPipelines,
1914 current_input_buffer: &'a Buffer,
1917 previous_input_buffer: &'a Buffer,
1920 data_buffer: &'a Buffer,
1926}
1927
1928impl<'a> PreprocessBindGroupBuilder<'a> {
1929 fn create_direct_preprocess_bind_groups(
1932 &self,
1933 work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1934 ) -> Option<PhasePreprocessBindGroups> {
1935 let work_item_buffer_size = NonZero::<u64>::try_from(
1939 work_item_buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()),
1940 )
1941 .ok();
1942
1943 Some(PhasePreprocessBindGroups::Direct(
1944 self.render_device.create_bind_group(
1945 "preprocess_direct_bind_group",
1946 &self.pipelines.direct_preprocess.bind_group_layout,
1947 &BindGroupEntries::with_indices((
1948 (0, self.view_uniforms.uniforms.binding()?),
1949 (3, self.current_input_buffer.as_entire_binding()),
1950 (4, self.previous_input_buffer.as_entire_binding()),
1951 (
1952 5,
1953 BindingResource::Buffer(BufferBinding {
1954 buffer: work_item_buffer.buffer()?,
1955 offset: 0,
1956 size: work_item_buffer_size,
1957 }),
1958 ),
1959 (6, self.data_buffer.as_entire_binding()),
1960 )),
1961 ),
1962 ))
1963 }
1964
1965 fn create_indirect_occlusion_culling_preprocess_bind_groups(
1968 &self,
1969 view_depth_pyramids: &Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1970 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1971 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1972 gpu_occlusion_culling_work_item_buffers: &GpuOcclusionCullingWorkItemBuffers,
1973 ) -> Option<PhasePreprocessBindGroups> {
1974 let GpuOcclusionCullingWorkItemBuffers {
1975 late_indexed: ref late_indexed_work_item_buffer,
1976 late_non_indexed: ref late_non_indexed_work_item_buffer,
1977 ..
1978 } = *gpu_occlusion_culling_work_item_buffers;
1979
1980 let (view_depth_pyramid, previous_view_uniform_offset) =
1981 view_depth_pyramids.get(self.view).ok()?;
1982
1983 Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
1984 early_indexed: self.create_indirect_occlusion_culling_early_indexed_bind_group(
1985 view_depth_pyramid,
1986 previous_view_uniform_offset,
1987 indexed_work_item_buffer,
1988 late_indexed_work_item_buffer,
1989 ),
1990
1991 early_non_indexed: self.create_indirect_occlusion_culling_early_non_indexed_bind_group(
1992 view_depth_pyramid,
1993 previous_view_uniform_offset,
1994 non_indexed_work_item_buffer,
1995 late_non_indexed_work_item_buffer,
1996 ),
1997
1998 late_indexed: self.create_indirect_occlusion_culling_late_indexed_bind_group(
1999 view_depth_pyramid,
2000 previous_view_uniform_offset,
2001 late_indexed_work_item_buffer,
2002 ),
2003
2004 late_non_indexed: self.create_indirect_occlusion_culling_late_non_indexed_bind_group(
2005 view_depth_pyramid,
2006 previous_view_uniform_offset,
2007 late_non_indexed_work_item_buffer,
2008 ),
2009 })
2010 }
2011
2012 fn create_indirect_occlusion_culling_early_indexed_bind_group(
2015 &self,
2016 view_depth_pyramid: &ViewDepthPyramid,
2017 previous_view_uniform_offset: &PreviousViewUniformOffset,
2018 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2019 late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2020 ) -> Option<BindGroup> {
2021 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2022 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2023 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2024
2025 match (
2026 self.phase_indirect_parameters_buffers
2027 .indexed
2028 .cpu_metadata_buffer(),
2029 self.phase_indirect_parameters_buffers
2030 .indexed
2031 .gpu_metadata_buffer(),
2032 indexed_work_item_buffer.buffer(),
2033 late_indexed_work_item_buffer.buffer(),
2034 self.late_indexed_indirect_parameters_buffer.buffer(),
2035 ) {
2036 (
2037 Some(indexed_cpu_metadata_buffer),
2038 Some(indexed_gpu_metadata_buffer),
2039 Some(indexed_work_item_gpu_buffer),
2040 Some(late_indexed_work_item_gpu_buffer),
2041 Some(late_indexed_indirect_parameters_buffer),
2042 ) => {
2043 let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2047 indexed_work_item_buffer.len() as u64
2048 * u64::from(PreprocessWorkItem::min_size()),
2049 )
2050 .ok();
2051
2052 Some(
2053 self.render_device.create_bind_group(
2054 "preprocess_early_indexed_gpu_occlusion_culling_bind_group",
2055 &self
2056 .pipelines
2057 .early_gpu_occlusion_culling_preprocess
2058 .bind_group_layout,
2059 &BindGroupEntries::with_indices((
2060 (3, self.current_input_buffer.as_entire_binding()),
2061 (4, self.previous_input_buffer.as_entire_binding()),
2062 (
2063 5,
2064 BindingResource::Buffer(BufferBinding {
2065 buffer: indexed_work_item_gpu_buffer,
2066 offset: 0,
2067 size: indexed_work_item_buffer_size,
2068 }),
2069 ),
2070 (6, self.data_buffer.as_entire_binding()),
2071 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2072 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2073 (9, mesh_culling_data_buffer.as_entire_binding()),
2074 (0, view_uniforms_binding.clone()),
2075 (10, &view_depth_pyramid.all_mips),
2076 (
2077 2,
2078 BufferBinding {
2079 buffer: previous_view_buffer,
2080 offset: previous_view_uniform_offset.offset as u64,
2081 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2082 },
2083 ),
2084 (
2085 11,
2086 BufferBinding {
2087 buffer: late_indexed_work_item_gpu_buffer,
2088 offset: 0,
2089 size: indexed_work_item_buffer_size,
2090 },
2091 ),
2092 (
2093 12,
2094 BufferBinding {
2095 buffer: late_indexed_indirect_parameters_buffer,
2096 offset: 0,
2097 size: NonZeroU64::new(
2098 late_indexed_indirect_parameters_buffer.size(),
2099 ),
2100 },
2101 ),
2102 )),
2103 ),
2104 )
2105 }
2106 _ => None,
2107 }
2108 }
2109
2110 fn create_indirect_occlusion_culling_early_non_indexed_bind_group(
2113 &self,
2114 view_depth_pyramid: &ViewDepthPyramid,
2115 previous_view_uniform_offset: &PreviousViewUniformOffset,
2116 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2117 late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2118 ) -> Option<BindGroup> {
2119 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2120 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2121 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2122
2123 match (
2124 self.phase_indirect_parameters_buffers
2125 .non_indexed
2126 .cpu_metadata_buffer(),
2127 self.phase_indirect_parameters_buffers
2128 .non_indexed
2129 .gpu_metadata_buffer(),
2130 non_indexed_work_item_buffer.buffer(),
2131 late_non_indexed_work_item_buffer.buffer(),
2132 self.late_non_indexed_indirect_parameters_buffer.buffer(),
2133 ) {
2134 (
2135 Some(non_indexed_cpu_metadata_buffer),
2136 Some(non_indexed_gpu_metadata_buffer),
2137 Some(non_indexed_work_item_gpu_buffer),
2138 Some(late_non_indexed_work_item_buffer),
2139 Some(late_non_indexed_indirect_parameters_buffer),
2140 ) => {
2141 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2145 non_indexed_work_item_buffer.len() as u64
2146 * u64::from(PreprocessWorkItem::min_size()),
2147 )
2148 .ok();
2149
2150 Some(
2151 self.render_device.create_bind_group(
2152 "preprocess_early_non_indexed_gpu_occlusion_culling_bind_group",
2153 &self
2154 .pipelines
2155 .early_gpu_occlusion_culling_preprocess
2156 .bind_group_layout,
2157 &BindGroupEntries::with_indices((
2158 (3, self.current_input_buffer.as_entire_binding()),
2159 (4, self.previous_input_buffer.as_entire_binding()),
2160 (
2161 5,
2162 BindingResource::Buffer(BufferBinding {
2163 buffer: non_indexed_work_item_gpu_buffer,
2164 offset: 0,
2165 size: non_indexed_work_item_buffer_size,
2166 }),
2167 ),
2168 (6, self.data_buffer.as_entire_binding()),
2169 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2170 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2171 (9, mesh_culling_data_buffer.as_entire_binding()),
2172 (0, view_uniforms_binding.clone()),
2173 (10, &view_depth_pyramid.all_mips),
2174 (
2175 2,
2176 BufferBinding {
2177 buffer: previous_view_buffer,
2178 offset: previous_view_uniform_offset.offset as u64,
2179 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2180 },
2181 ),
2182 (
2183 11,
2184 BufferBinding {
2185 buffer: late_non_indexed_work_item_buffer,
2186 offset: 0,
2187 size: non_indexed_work_item_buffer_size,
2188 },
2189 ),
2190 (
2191 12,
2192 BufferBinding {
2193 buffer: late_non_indexed_indirect_parameters_buffer,
2194 offset: 0,
2195 size: NonZeroU64::new(
2196 late_non_indexed_indirect_parameters_buffer.size(),
2197 ),
2198 },
2199 ),
2200 )),
2201 ),
2202 )
2203 }
2204 _ => None,
2205 }
2206 }
2207
2208 fn create_indirect_occlusion_culling_late_indexed_bind_group(
2211 &self,
2212 view_depth_pyramid: &ViewDepthPyramid,
2213 previous_view_uniform_offset: &PreviousViewUniformOffset,
2214 late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2215 ) -> Option<BindGroup> {
2216 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2217 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2218 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2219
2220 match (
2221 self.phase_indirect_parameters_buffers
2222 .indexed
2223 .cpu_metadata_buffer(),
2224 self.phase_indirect_parameters_buffers
2225 .indexed
2226 .gpu_metadata_buffer(),
2227 late_indexed_work_item_buffer.buffer(),
2228 self.late_indexed_indirect_parameters_buffer.buffer(),
2229 ) {
2230 (
2231 Some(indexed_cpu_metadata_buffer),
2232 Some(indexed_gpu_metadata_buffer),
2233 Some(late_indexed_work_item_gpu_buffer),
2234 Some(late_indexed_indirect_parameters_buffer),
2235 ) => {
2236 let late_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2240 late_indexed_work_item_buffer.len() as u64
2241 * u64::from(PreprocessWorkItem::min_size()),
2242 )
2243 .ok();
2244
2245 Some(
2246 self.render_device.create_bind_group(
2247 "preprocess_late_indexed_gpu_occlusion_culling_bind_group",
2248 &self
2249 .pipelines
2250 .late_gpu_occlusion_culling_preprocess
2251 .bind_group_layout,
2252 &BindGroupEntries::with_indices((
2253 (3, self.current_input_buffer.as_entire_binding()),
2254 (4, self.previous_input_buffer.as_entire_binding()),
2255 (
2256 5,
2257 BindingResource::Buffer(BufferBinding {
2258 buffer: late_indexed_work_item_gpu_buffer,
2259 offset: 0,
2260 size: late_indexed_work_item_buffer_size,
2261 }),
2262 ),
2263 (6, self.data_buffer.as_entire_binding()),
2264 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2265 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2266 (9, mesh_culling_data_buffer.as_entire_binding()),
2267 (0, view_uniforms_binding.clone()),
2268 (10, &view_depth_pyramid.all_mips),
2269 (
2270 2,
2271 BufferBinding {
2272 buffer: previous_view_buffer,
2273 offset: previous_view_uniform_offset.offset as u64,
2274 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2275 },
2276 ),
2277 (
2278 12,
2279 BufferBinding {
2280 buffer: late_indexed_indirect_parameters_buffer,
2281 offset: 0,
2282 size: NonZeroU64::new(
2283 late_indexed_indirect_parameters_buffer.size(),
2284 ),
2285 },
2286 ),
2287 )),
2288 ),
2289 )
2290 }
2291 _ => None,
2292 }
2293 }
2294
2295 fn create_indirect_occlusion_culling_late_non_indexed_bind_group(
2298 &self,
2299 view_depth_pyramid: &ViewDepthPyramid,
2300 previous_view_uniform_offset: &PreviousViewUniformOffset,
2301 late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2302 ) -> Option<BindGroup> {
2303 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2304 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2305 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2306
2307 match (
2308 self.phase_indirect_parameters_buffers
2309 .non_indexed
2310 .cpu_metadata_buffer(),
2311 self.phase_indirect_parameters_buffers
2312 .non_indexed
2313 .gpu_metadata_buffer(),
2314 late_non_indexed_work_item_buffer.buffer(),
2315 self.late_non_indexed_indirect_parameters_buffer.buffer(),
2316 ) {
2317 (
2318 Some(non_indexed_cpu_metadata_buffer),
2319 Some(non_indexed_gpu_metadata_buffer),
2320 Some(non_indexed_work_item_gpu_buffer),
2321 Some(late_non_indexed_indirect_parameters_buffer),
2322 ) => {
2323 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2327 late_non_indexed_work_item_buffer.len() as u64
2328 * u64::from(PreprocessWorkItem::min_size()),
2329 )
2330 .ok();
2331
2332 Some(
2333 self.render_device.create_bind_group(
2334 "preprocess_late_non_indexed_gpu_occlusion_culling_bind_group",
2335 &self
2336 .pipelines
2337 .late_gpu_occlusion_culling_preprocess
2338 .bind_group_layout,
2339 &BindGroupEntries::with_indices((
2340 (3, self.current_input_buffer.as_entire_binding()),
2341 (4, self.previous_input_buffer.as_entire_binding()),
2342 (
2343 5,
2344 BindingResource::Buffer(BufferBinding {
2345 buffer: non_indexed_work_item_gpu_buffer,
2346 offset: 0,
2347 size: non_indexed_work_item_buffer_size,
2348 }),
2349 ),
2350 (6, self.data_buffer.as_entire_binding()),
2351 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2352 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2353 (9, mesh_culling_data_buffer.as_entire_binding()),
2354 (0, view_uniforms_binding.clone()),
2355 (10, &view_depth_pyramid.all_mips),
2356 (
2357 2,
2358 BufferBinding {
2359 buffer: previous_view_buffer,
2360 offset: previous_view_uniform_offset.offset as u64,
2361 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2362 },
2363 ),
2364 (
2365 12,
2366 BufferBinding {
2367 buffer: late_non_indexed_indirect_parameters_buffer,
2368 offset: 0,
2369 size: NonZeroU64::new(
2370 late_non_indexed_indirect_parameters_buffer.size(),
2371 ),
2372 },
2373 ),
2374 )),
2375 ),
2376 )
2377 }
2378 _ => None,
2379 }
2380 }
2381
2382 fn create_indirect_frustum_culling_preprocess_bind_groups(
2385 &self,
2386 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2387 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2388 ) -> Option<PhasePreprocessBindGroups> {
2389 Some(PhasePreprocessBindGroups::IndirectFrustumCulling {
2390 indexed: self
2391 .create_indirect_frustum_culling_indexed_bind_group(indexed_work_item_buffer),
2392 non_indexed: self.create_indirect_frustum_culling_non_indexed_bind_group(
2393 non_indexed_work_item_buffer,
2394 ),
2395 })
2396 }
2397
2398 fn create_indirect_frustum_culling_indexed_bind_group(
2401 &self,
2402 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2403 ) -> Option<BindGroup> {
2404 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2405 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2406
2407 match (
2408 self.phase_indirect_parameters_buffers
2409 .indexed
2410 .cpu_metadata_buffer(),
2411 self.phase_indirect_parameters_buffers
2412 .indexed
2413 .gpu_metadata_buffer(),
2414 indexed_work_item_buffer.buffer(),
2415 ) {
2416 (
2417 Some(indexed_cpu_metadata_buffer),
2418 Some(indexed_gpu_metadata_buffer),
2419 Some(indexed_work_item_gpu_buffer),
2420 ) => {
2421 let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2425 indexed_work_item_buffer.len() as u64
2426 * u64::from(PreprocessWorkItem::min_size()),
2427 )
2428 .ok();
2429
2430 Some(
2431 self.render_device.create_bind_group(
2432 "preprocess_gpu_indexed_frustum_culling_bind_group",
2433 &self
2434 .pipelines
2435 .gpu_frustum_culling_preprocess
2436 .bind_group_layout,
2437 &BindGroupEntries::with_indices((
2438 (3, self.current_input_buffer.as_entire_binding()),
2439 (4, self.previous_input_buffer.as_entire_binding()),
2440 (
2441 5,
2442 BindingResource::Buffer(BufferBinding {
2443 buffer: indexed_work_item_gpu_buffer,
2444 offset: 0,
2445 size: indexed_work_item_buffer_size,
2446 }),
2447 ),
2448 (6, self.data_buffer.as_entire_binding()),
2449 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2450 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2451 (9, mesh_culling_data_buffer.as_entire_binding()),
2452 (0, view_uniforms_binding.clone()),
2453 )),
2454 ),
2455 )
2456 }
2457 _ => None,
2458 }
2459 }
2460
2461 fn create_indirect_frustum_culling_non_indexed_bind_group(
2464 &self,
2465 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2466 ) -> Option<BindGroup> {
2467 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2468 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2469
2470 match (
2471 self.phase_indirect_parameters_buffers
2472 .non_indexed
2473 .cpu_metadata_buffer(),
2474 self.phase_indirect_parameters_buffers
2475 .non_indexed
2476 .gpu_metadata_buffer(),
2477 non_indexed_work_item_buffer.buffer(),
2478 ) {
2479 (
2480 Some(non_indexed_cpu_metadata_buffer),
2481 Some(non_indexed_gpu_metadata_buffer),
2482 Some(non_indexed_work_item_gpu_buffer),
2483 ) => {
2484 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2488 non_indexed_work_item_buffer.len() as u64
2489 * u64::from(PreprocessWorkItem::min_size()),
2490 )
2491 .ok();
2492
2493 Some(
2494 self.render_device.create_bind_group(
2495 "preprocess_gpu_non_indexed_frustum_culling_bind_group",
2496 &self
2497 .pipelines
2498 .gpu_frustum_culling_preprocess
2499 .bind_group_layout,
2500 &BindGroupEntries::with_indices((
2501 (3, self.current_input_buffer.as_entire_binding()),
2502 (4, self.previous_input_buffer.as_entire_binding()),
2503 (
2504 5,
2505 BindingResource::Buffer(BufferBinding {
2506 buffer: non_indexed_work_item_gpu_buffer,
2507 offset: 0,
2508 size: non_indexed_work_item_buffer_size,
2509 }),
2510 ),
2511 (6, self.data_buffer.as_entire_binding()),
2512 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2513 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2514 (9, mesh_culling_data_buffer.as_entire_binding()),
2515 (0, view_uniforms_binding.clone()),
2516 )),
2517 ),
2518 )
2519 }
2520 _ => None,
2521 }
2522 }
2523}
2524
2525fn create_build_indirect_parameters_bind_groups(
2529 commands: &mut Commands,
2530 render_device: &RenderDevice,
2531 pipelines: &PreprocessPipelines,
2532 current_input_buffer: &Buffer,
2533 indirect_parameters_buffers: &IndirectParametersBuffers,
2534) {
2535 let mut build_indirect_parameters_bind_groups = BuildIndirectParametersBindGroups::new();
2536
2537 for (phase_type_id, phase_indirect_parameters_buffer) in indirect_parameters_buffers.iter() {
2538 build_indirect_parameters_bind_groups.insert(
2539 *phase_type_id,
2540 PhaseBuildIndirectParametersBindGroups {
2541 reset_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2542 .indexed
2543 .batch_sets_buffer(),)
2544 {
2545 (Some(indexed_batch_sets_buffer),) => Some(
2546 render_device.create_bind_group(
2547 "reset_indexed_indirect_batch_sets_bind_group",
2548 &pipelines
2551 .early_phase
2552 .reset_indirect_batch_sets
2553 .bind_group_layout,
2554 &BindGroupEntries::sequential((
2555 indexed_batch_sets_buffer.as_entire_binding(),
2556 )),
2557 ),
2558 ),
2559 _ => None,
2560 },
2561
2562 reset_non_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2563 .non_indexed
2564 .batch_sets_buffer(),)
2565 {
2566 (Some(non_indexed_batch_sets_buffer),) => Some(
2567 render_device.create_bind_group(
2568 "reset_non_indexed_indirect_batch_sets_bind_group",
2569 &pipelines
2572 .early_phase
2573 .reset_indirect_batch_sets
2574 .bind_group_layout,
2575 &BindGroupEntries::sequential((
2576 non_indexed_batch_sets_buffer.as_entire_binding(),
2577 )),
2578 ),
2579 ),
2580 _ => None,
2581 },
2582
2583 build_indexed_indirect: match (
2584 phase_indirect_parameters_buffer
2585 .indexed
2586 .cpu_metadata_buffer(),
2587 phase_indirect_parameters_buffer
2588 .indexed
2589 .gpu_metadata_buffer(),
2590 phase_indirect_parameters_buffer.indexed.data_buffer(),
2591 phase_indirect_parameters_buffer.indexed.batch_sets_buffer(),
2592 ) {
2593 (
2594 Some(indexed_indirect_parameters_cpu_metadata_buffer),
2595 Some(indexed_indirect_parameters_gpu_metadata_buffer),
2596 Some(indexed_indirect_parameters_data_buffer),
2597 Some(indexed_batch_sets_buffer),
2598 ) => Some(
2599 render_device.create_bind_group(
2600 "build_indexed_indirect_parameters_bind_group",
2601 &pipelines
2604 .gpu_frustum_culling_build_indexed_indirect_params
2605 .bind_group_layout,
2606 &BindGroupEntries::sequential((
2607 current_input_buffer.as_entire_binding(),
2608 BufferBinding {
2611 buffer: indexed_indirect_parameters_cpu_metadata_buffer,
2612 offset: 0,
2613 size: NonZeroU64::new(
2614 phase_indirect_parameters_buffer.indexed.batch_count()
2615 as u64
2616 * size_of::<IndirectParametersCpuMetadata>() as u64,
2617 ),
2618 },
2619 BufferBinding {
2620 buffer: indexed_indirect_parameters_gpu_metadata_buffer,
2621 offset: 0,
2622 size: NonZeroU64::new(
2623 phase_indirect_parameters_buffer.indexed.batch_count()
2624 as u64
2625 * size_of::<IndirectParametersGpuMetadata>() as u64,
2626 ),
2627 },
2628 indexed_batch_sets_buffer.as_entire_binding(),
2629 indexed_indirect_parameters_data_buffer.as_entire_binding(),
2630 )),
2631 ),
2632 ),
2633 _ => None,
2634 },
2635
2636 build_non_indexed_indirect: match (
2637 phase_indirect_parameters_buffer
2638 .non_indexed
2639 .cpu_metadata_buffer(),
2640 phase_indirect_parameters_buffer
2641 .non_indexed
2642 .gpu_metadata_buffer(),
2643 phase_indirect_parameters_buffer.non_indexed.data_buffer(),
2644 phase_indirect_parameters_buffer
2645 .non_indexed
2646 .batch_sets_buffer(),
2647 ) {
2648 (
2649 Some(non_indexed_indirect_parameters_cpu_metadata_buffer),
2650 Some(non_indexed_indirect_parameters_gpu_metadata_buffer),
2651 Some(non_indexed_indirect_parameters_data_buffer),
2652 Some(non_indexed_batch_sets_buffer),
2653 ) => Some(
2654 render_device.create_bind_group(
2655 "build_non_indexed_indirect_parameters_bind_group",
2656 &pipelines
2659 .gpu_frustum_culling_build_non_indexed_indirect_params
2660 .bind_group_layout,
2661 &BindGroupEntries::sequential((
2662 current_input_buffer.as_entire_binding(),
2663 BufferBinding {
2666 buffer: non_indexed_indirect_parameters_cpu_metadata_buffer,
2667 offset: 0,
2668 size: NonZeroU64::new(
2669 phase_indirect_parameters_buffer.non_indexed.batch_count()
2670 as u64
2671 * size_of::<IndirectParametersCpuMetadata>() as u64,
2672 ),
2673 },
2674 BufferBinding {
2675 buffer: non_indexed_indirect_parameters_gpu_metadata_buffer,
2676 offset: 0,
2677 size: NonZeroU64::new(
2678 phase_indirect_parameters_buffer.non_indexed.batch_count()
2679 as u64
2680 * size_of::<IndirectParametersGpuMetadata>() as u64,
2681 ),
2682 },
2683 non_indexed_batch_sets_buffer.as_entire_binding(),
2684 non_indexed_indirect_parameters_data_buffer.as_entire_binding(),
2685 )),
2686 ),
2687 ),
2688 _ => None,
2689 },
2690 },
2691 );
2692 }
2693
2694 commands.insert_resource(build_indirect_parameters_bind_groups);
2695}
2696
2697pub fn write_mesh_culling_data_buffer(
2699 render_device: Res<RenderDevice>,
2700 render_queue: Res<RenderQueue>,
2701 mut mesh_culling_data_buffer: ResMut<MeshCullingDataBuffer>,
2702) {
2703 mesh_culling_data_buffer.write_buffer(&render_device, &render_queue);
2704}