1use core::num::{NonZero, NonZeroU64};
10
11use bevy_app::{App, Plugin};
12use bevy_asset::{embedded_asset, load_embedded_asset, Handle};
13use bevy_core_pipeline::{
14 core_3d::graph::{Core3d, Node3d},
15 experimental::mip_generation::ViewDepthPyramid,
16 prepass::{DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms},
17};
18use bevy_derive::{Deref, DerefMut};
19use bevy_ecs::{
20 component::Component,
21 entity::Entity,
22 prelude::resource_exists,
23 query::{Has, Or, QueryState, With, Without},
24 resource::Resource,
25 schedule::IntoScheduleConfigs as _,
26 system::{lifetimeless::Read, Commands, Query, Res, ResMut},
27 world::{FromWorld, World},
28};
29use bevy_render::{
30 batching::gpu_preprocessing::{
31 BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingMode,
32 GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers,
33 IndirectParametersCpuMetadata, IndirectParametersGpuMetadata, IndirectParametersIndexed,
34 IndirectParametersNonIndexed, LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem,
35 PreprocessWorkItemBuffers, UntypedPhaseBatchedInstanceBuffers,
36 UntypedPhaseIndirectParametersBuffers,
37 },
38 diagnostic::RecordDiagnostics,
39 experimental::occlusion_culling::OcclusionCulling,
40 render_graph::{Node, NodeRunError, RenderGraphContext, RenderGraphExt},
41 render_resource::{
42 binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer},
43 BindGroup, BindGroupEntries, BindGroupLayoutDescriptor, BindingResource, Buffer,
44 BufferBinding, CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor,
45 DynamicBindGroupLayoutEntries, PipelineCache, PushConstantRange, RawBufferVec,
46 ShaderStages, ShaderType, SpecializedComputePipeline, SpecializedComputePipelines,
47 TextureSampleType, UninitBufferVec,
48 },
49 renderer::{RenderContext, RenderDevice, RenderQueue},
50 settings::WgpuFeatures,
51 view::{ExtractedView, NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms},
52 Render, RenderApp, RenderSystems,
53};
54use bevy_shader::Shader;
55use bevy_utils::{default, TypeIdMap};
56use bitflags::bitflags;
57use smallvec::{smallvec, SmallVec};
58use tracing::warn;
59
60use crate::{
61 graph::NodePbr, MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform,
62};
63
64use super::{ShadowView, ViewLightEntities};
65
66const WORKGROUP_SIZE: usize = 64;
68
69pub struct GpuMeshPreprocessPlugin {
74 pub use_gpu_instance_buffer_builder: bool,
79}
80
81#[derive(Default)]
85pub struct ClearIndirectParametersMetadataNode;
86
87pub struct EarlyGpuPreprocessNode {
95 view_query: QueryState<
96 (
97 Read<ExtractedView>,
98 Option<Read<PreprocessBindGroups>>,
99 Option<Read<ViewUniformOffset>>,
100 Has<NoIndirectDrawing>,
101 Has<OcclusionCulling>,
102 ),
103 Without<SkipGpuPreprocess>,
104 >,
105 main_view_query: QueryState<Read<ViewLightEntities>>,
106}
107
108pub struct LateGpuPreprocessNode {
116 view_query: QueryState<
117 (
118 Read<ExtractedView>,
119 Read<PreprocessBindGroups>,
120 Read<ViewUniformOffset>,
121 ),
122 (
123 Without<SkipGpuPreprocess>,
124 Without<NoIndirectDrawing>,
125 With<OcclusionCulling>,
126 With<DepthPrepass>,
127 ),
128 >,
129}
130
131pub struct EarlyPrepassBuildIndirectParametersNode {
139 view_query: QueryState<
140 Read<PreprocessBindGroups>,
141 (
142 Without<SkipGpuPreprocess>,
143 Without<NoIndirectDrawing>,
144 Or<(With<DepthPrepass>, With<ShadowView>)>,
145 ),
146 >,
147}
148
149pub struct LatePrepassBuildIndirectParametersNode {
158 view_query: QueryState<
159 Read<PreprocessBindGroups>,
160 (
161 Without<SkipGpuPreprocess>,
162 Without<NoIndirectDrawing>,
163 Or<(With<DepthPrepass>, With<ShadowView>)>,
164 With<OcclusionCulling>,
165 ),
166 >,
167}
168
169pub struct MainBuildIndirectParametersNode {
178 view_query: QueryState<
179 Read<PreprocessBindGroups>,
180 (Without<SkipGpuPreprocess>, Without<NoIndirectDrawing>),
181 >,
182}
183
184#[derive(Resource)]
187pub struct PreprocessPipelines {
188 pub direct_preprocess: PreprocessPipeline,
191 pub gpu_frustum_culling_preprocess: PreprocessPipeline,
196 pub early_gpu_occlusion_culling_preprocess: PreprocessPipeline,
201 pub late_gpu_occlusion_culling_preprocess: PreprocessPipeline,
206 pub gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
209 pub gpu_frustum_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
213 pub early_phase: PreprocessPhasePipelines,
216 pub late_phase: PreprocessPhasePipelines,
220 pub main_phase: PreprocessPhasePipelines,
222}
223
224#[derive(Clone)]
228pub struct PreprocessPhasePipelines {
229 pub reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline,
232 pub gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
237 pub gpu_occlusion_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
242}
243
244pub struct PreprocessPipeline {
246 pub bind_group_layout: BindGroupLayoutDescriptor,
248 pub shader: Handle<Shader>,
250 pub pipeline_id: Option<CachedComputePipelineId>,
254}
255
256#[derive(Clone)]
261pub struct ResetIndirectBatchSetsPipeline {
262 pub bind_group_layout: BindGroupLayoutDescriptor,
264 pub shader: Handle<Shader>,
266 pub pipeline_id: Option<CachedComputePipelineId>,
270}
271
272#[derive(Clone)]
274pub struct BuildIndirectParametersPipeline {
275 pub bind_group_layout: BindGroupLayoutDescriptor,
277 pub shader: Handle<Shader>,
279 pub pipeline_id: Option<CachedComputePipelineId>,
283}
284
285bitflags! {
286 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
288 pub struct PreprocessPipelineKey: u8 {
289 const FRUSTUM_CULLING = 1;
293 const OCCLUSION_CULLING = 2;
297 const EARLY_PHASE = 4;
301 }
302
303 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
305 pub struct BuildIndirectParametersPipelineKey: u8 {
306 const INDEXED = 1;
311 const MULTI_DRAW_INDIRECT_COUNT_SUPPORTED = 2;
315 const OCCLUSION_CULLING = 4;
319 const EARLY_PHASE = 8;
323 const LATE_PHASE = 16;
327 const MAIN_PHASE = 32;
333 }
334}
335
336#[derive(Component, Clone, Deref, DerefMut)]
343pub struct PreprocessBindGroups(pub TypeIdMap<PhasePreprocessBindGroups>);
344
345#[derive(Clone)]
348pub enum PhasePreprocessBindGroups {
349 Direct(BindGroup),
355
356 IndirectFrustumCulling {
362 indexed: Option<BindGroup>,
364 non_indexed: Option<BindGroup>,
366 },
367
368 IndirectOcclusionCulling {
376 early_indexed: Option<BindGroup>,
379 early_non_indexed: Option<BindGroup>,
382 late_indexed: Option<BindGroup>,
385 late_non_indexed: Option<BindGroup>,
388 },
389}
390
391#[derive(Resource, Default, Deref, DerefMut)]
397pub struct BuildIndirectParametersBindGroups(pub TypeIdMap<PhaseBuildIndirectParametersBindGroups>);
398
399impl BuildIndirectParametersBindGroups {
400 pub fn new() -> BuildIndirectParametersBindGroups {
402 Self::default()
403 }
404}
405
406pub struct PhaseBuildIndirectParametersBindGroups {
409 reset_indexed_indirect_batch_sets: Option<BindGroup>,
412 reset_non_indexed_indirect_batch_sets: Option<BindGroup>,
415 build_indexed_indirect: Option<BindGroup>,
418 build_non_indexed_indirect: Option<BindGroup>,
421}
422
423#[derive(Component, Default)]
426pub struct SkipGpuPreprocess;
427
428impl Plugin for GpuMeshPreprocessPlugin {
429 fn build(&self, app: &mut App) {
430 embedded_asset!(app, "mesh_preprocess.wgsl");
431 embedded_asset!(app, "reset_indirect_batch_sets.wgsl");
432 embedded_asset!(app, "build_indirect_params.wgsl");
433 }
434
435 fn finish(&self, app: &mut App) {
436 let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
437 return;
438 };
439
440 let gpu_preprocessing_support = render_app.world().resource::<GpuPreprocessingSupport>();
443 if !self.use_gpu_instance_buffer_builder || !gpu_preprocessing_support.is_available() {
444 return;
445 }
446
447 render_app
448 .init_resource::<PreprocessPipelines>()
449 .init_resource::<SpecializedComputePipelines<PreprocessPipeline>>()
450 .init_resource::<SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>>()
451 .init_resource::<SpecializedComputePipelines<BuildIndirectParametersPipeline>>()
452 .add_systems(
453 Render,
454 (
455 prepare_preprocess_pipelines.in_set(RenderSystems::Prepare),
456 prepare_preprocess_bind_groups
457 .run_if(resource_exists::<BatchedInstanceBuffers<
458 MeshUniform,
459 MeshInputUniform
460 >>)
461 .in_set(RenderSystems::PrepareBindGroups),
462 write_mesh_culling_data_buffer.in_set(RenderSystems::PrepareResourcesFlush),
463 ),
464 )
465 .add_render_graph_node::<ClearIndirectParametersMetadataNode>(
466 Core3d,
467 NodePbr::ClearIndirectParametersMetadata
468 )
469 .add_render_graph_node::<EarlyGpuPreprocessNode>(Core3d, NodePbr::EarlyGpuPreprocess)
470 .add_render_graph_node::<LateGpuPreprocessNode>(Core3d, NodePbr::LateGpuPreprocess)
471 .add_render_graph_node::<EarlyPrepassBuildIndirectParametersNode>(
472 Core3d,
473 NodePbr::EarlyPrepassBuildIndirectParameters,
474 )
475 .add_render_graph_node::<LatePrepassBuildIndirectParametersNode>(
476 Core3d,
477 NodePbr::LatePrepassBuildIndirectParameters,
478 )
479 .add_render_graph_node::<MainBuildIndirectParametersNode>(
480 Core3d,
481 NodePbr::MainBuildIndirectParameters,
482 )
483 .add_render_graph_edges(
484 Core3d,
485 (
486 NodePbr::ClearIndirectParametersMetadata,
487 NodePbr::EarlyGpuPreprocess,
488 NodePbr::EarlyPrepassBuildIndirectParameters,
489 Node3d::EarlyPrepass,
490 Node3d::EarlyDeferredPrepass,
491 Node3d::EarlyDownsampleDepth,
492 NodePbr::LateGpuPreprocess,
493 NodePbr::LatePrepassBuildIndirectParameters,
494 Node3d::LatePrepass,
495 Node3d::LateDeferredPrepass,
496 NodePbr::MainBuildIndirectParameters,
497 Node3d::StartMainPass,
498 ),
499 ).add_render_graph_edges(
500 Core3d,
501 (
502 NodePbr::EarlyPrepassBuildIndirectParameters,
503 NodePbr::EarlyShadowPass,
504 Node3d::EarlyDownsampleDepth,
505 )
506 ).add_render_graph_edges(
507 Core3d,
508 (
509 NodePbr::LatePrepassBuildIndirectParameters,
510 NodePbr::LateShadowPass,
511 NodePbr::MainBuildIndirectParameters,
512 )
513 );
514 }
515}
516
517impl Node for ClearIndirectParametersMetadataNode {
518 fn run<'w>(
519 &self,
520 _: &mut RenderGraphContext,
521 render_context: &mut RenderContext<'w>,
522 world: &'w World,
523 ) -> Result<(), NodeRunError> {
524 let Some(indirect_parameters_buffers) = world.get_resource::<IndirectParametersBuffers>()
525 else {
526 return Ok(());
527 };
528
529 for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() {
531 if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
532 .indexed
533 .gpu_metadata_buffer()
534 {
535 render_context.command_encoder().clear_buffer(
536 indexed_gpu_metadata_buffer,
537 0,
538 Some(
539 phase_indirect_parameters_buffers.indexed.batch_count() as u64
540 * size_of::<IndirectParametersGpuMetadata>() as u64,
541 ),
542 );
543 }
544
545 if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
546 .non_indexed
547 .gpu_metadata_buffer()
548 {
549 render_context.command_encoder().clear_buffer(
550 non_indexed_gpu_metadata_buffer,
551 0,
552 Some(
553 phase_indirect_parameters_buffers.non_indexed.batch_count() as u64
554 * size_of::<IndirectParametersGpuMetadata>() as u64,
555 ),
556 );
557 }
558 }
559
560 Ok(())
561 }
562}
563
564impl FromWorld for EarlyGpuPreprocessNode {
565 fn from_world(world: &mut World) -> Self {
566 Self {
567 view_query: QueryState::new(world),
568 main_view_query: QueryState::new(world),
569 }
570 }
571}
572
573impl Node for EarlyGpuPreprocessNode {
574 fn update(&mut self, world: &mut World) {
575 self.view_query.update_archetypes(world);
576 self.main_view_query.update_archetypes(world);
577 }
578
579 fn run<'w>(
580 &self,
581 graph: &mut RenderGraphContext,
582 render_context: &mut RenderContext<'w>,
583 world: &'w World,
584 ) -> Result<(), NodeRunError> {
585 let diagnostics = render_context.diagnostic_recorder();
586
587 let batched_instance_buffers =
589 world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
590
591 let pipeline_cache = world.resource::<PipelineCache>();
592 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
593
594 let mut compute_pass =
595 render_context
596 .command_encoder()
597 .begin_compute_pass(&ComputePassDescriptor {
598 label: Some("early_mesh_preprocessing"),
599 timestamp_writes: None,
600 });
601 let pass_span = diagnostics.pass_span(&mut compute_pass, "early_mesh_preprocessing");
602
603 let mut all_views: SmallVec<[_; 8]> = SmallVec::new();
604 all_views.push(graph.view_entity());
605 if let Ok(shadow_cascade_views) =
606 self.main_view_query.get_manual(world, graph.view_entity())
607 {
608 all_views.extend(shadow_cascade_views.lights.iter().copied());
609 }
610
611 for view_entity in all_views {
614 let Ok((
615 view,
616 bind_groups,
617 view_uniform_offset,
618 no_indirect_drawing,
619 occlusion_culling,
620 )) = self.view_query.get_manual(world, view_entity)
621 else {
622 continue;
623 };
624
625 let Some(bind_groups) = bind_groups else {
626 continue;
627 };
628 let Some(view_uniform_offset) = view_uniform_offset else {
629 continue;
630 };
631
632 let maybe_pipeline_id = if no_indirect_drawing {
635 preprocess_pipelines.direct_preprocess.pipeline_id
636 } else if occlusion_culling {
637 preprocess_pipelines
638 .early_gpu_occlusion_culling_preprocess
639 .pipeline_id
640 } else {
641 preprocess_pipelines
642 .gpu_frustum_culling_preprocess
643 .pipeline_id
644 };
645
646 let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
648 warn!("The build mesh uniforms pipeline wasn't ready");
649 continue;
650 };
651
652 let Some(preprocess_pipeline) =
653 pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
654 else {
655 continue;
657 };
658
659 compute_pass.set_pipeline(preprocess_pipeline);
660
661 for (phase_type_id, batched_phase_instance_buffers) in
663 &batched_instance_buffers.phase_instance_buffers
664 {
665 let Some(work_item_buffers) = batched_phase_instance_buffers
667 .work_item_buffers
668 .get(&view.retained_view_entity)
669 else {
670 continue;
671 };
672
673 let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else {
675 continue;
676 };
677
678 let dynamic_offsets = [view_uniform_offset.offset];
682
683 match *phase_bind_groups {
685 PhasePreprocessBindGroups::Direct(ref bind_group) => {
686 let PreprocessWorkItemBuffers::Direct(work_item_buffer) = work_item_buffers
689 else {
690 continue;
691 };
692 compute_pass.set_bind_group(0, bind_group, &dynamic_offsets);
693 let workgroup_count = work_item_buffer.len().div_ceil(WORKGROUP_SIZE);
694 if workgroup_count > 0 {
695 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
696 }
697 }
698
699 PhasePreprocessBindGroups::IndirectFrustumCulling {
700 indexed: ref maybe_indexed_bind_group,
701 non_indexed: ref maybe_non_indexed_bind_group,
702 }
703 | PhasePreprocessBindGroups::IndirectOcclusionCulling {
704 early_indexed: ref maybe_indexed_bind_group,
705 early_non_indexed: ref maybe_non_indexed_bind_group,
706 ..
707 } => {
708 let PreprocessWorkItemBuffers::Indirect {
711 indexed: indexed_buffer,
712 non_indexed: non_indexed_buffer,
713 ..
714 } = work_item_buffers
715 else {
716 continue;
717 };
718
719 if let Some(indexed_bind_group) = maybe_indexed_bind_group {
721 if let PreprocessWorkItemBuffers::Indirect {
722 gpu_occlusion_culling:
723 Some(GpuOcclusionCullingWorkItemBuffers {
724 late_indirect_parameters_indexed_offset,
725 ..
726 }),
727 ..
728 } = *work_item_buffers
729 {
730 compute_pass.set_push_constants(
731 0,
732 bytemuck::bytes_of(&late_indirect_parameters_indexed_offset),
733 );
734 }
735
736 compute_pass.set_bind_group(0, indexed_bind_group, &dynamic_offsets);
737 let workgroup_count = indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
738 if workgroup_count > 0 {
739 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
740 }
741 }
742
743 if let Some(non_indexed_bind_group) = maybe_non_indexed_bind_group {
745 if let PreprocessWorkItemBuffers::Indirect {
746 gpu_occlusion_culling:
747 Some(GpuOcclusionCullingWorkItemBuffers {
748 late_indirect_parameters_non_indexed_offset,
749 ..
750 }),
751 ..
752 } = *work_item_buffers
753 {
754 compute_pass.set_push_constants(
755 0,
756 bytemuck::bytes_of(
757 &late_indirect_parameters_non_indexed_offset,
758 ),
759 );
760 }
761
762 compute_pass.set_bind_group(
763 0,
764 non_indexed_bind_group,
765 &dynamic_offsets,
766 );
767 let workgroup_count = non_indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
768 if workgroup_count > 0 {
769 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
770 }
771 }
772 }
773 }
774 }
775 }
776
777 pass_span.end(&mut compute_pass);
778
779 Ok(())
780 }
781}
782
783impl FromWorld for EarlyPrepassBuildIndirectParametersNode {
784 fn from_world(world: &mut World) -> Self {
785 Self {
786 view_query: QueryState::new(world),
787 }
788 }
789}
790
791impl FromWorld for LatePrepassBuildIndirectParametersNode {
792 fn from_world(world: &mut World) -> Self {
793 Self {
794 view_query: QueryState::new(world),
795 }
796 }
797}
798
799impl FromWorld for MainBuildIndirectParametersNode {
800 fn from_world(world: &mut World) -> Self {
801 Self {
802 view_query: QueryState::new(world),
803 }
804 }
805}
806
807impl FromWorld for LateGpuPreprocessNode {
808 fn from_world(world: &mut World) -> Self {
809 Self {
810 view_query: QueryState::new(world),
811 }
812 }
813}
814
815impl Node for LateGpuPreprocessNode {
816 fn update(&mut self, world: &mut World) {
817 self.view_query.update_archetypes(world);
818 }
819
820 fn run<'w>(
821 &self,
822 _: &mut RenderGraphContext,
823 render_context: &mut RenderContext<'w>,
824 world: &'w World,
825 ) -> Result<(), NodeRunError> {
826 let diagnostics = render_context.diagnostic_recorder();
827
828 let batched_instance_buffers =
830 world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
831
832 let pipeline_cache = world.resource::<PipelineCache>();
833 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
834
835 let maybe_pipeline_id = preprocess_pipelines
836 .late_gpu_occlusion_culling_preprocess
837 .pipeline_id;
838
839 let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
841 warn!("The build mesh uniforms pipeline wasn't ready");
842 return Ok(());
843 };
844
845 let Some(preprocess_pipeline) = pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
846 else {
847 return Ok(());
849 };
850
851 let mut compute_pass =
852 render_context
853 .command_encoder()
854 .begin_compute_pass(&ComputePassDescriptor {
855 label: Some("late_mesh_preprocessing"),
856 timestamp_writes: None,
857 });
858
859 let pass_span = diagnostics.pass_span(&mut compute_pass, "late_mesh_preprocessing");
860
861 for (view, bind_groups, view_uniform_offset) in self.view_query.iter_manual(world) {
863 compute_pass.set_pipeline(preprocess_pipeline);
864
865 for (phase_type_id, batched_phase_instance_buffers) in
868 &batched_instance_buffers.phase_instance_buffers
869 {
870 let UntypedPhaseBatchedInstanceBuffers {
871 ref work_item_buffers,
872 ref late_indexed_indirect_parameters_buffer,
873 ref late_non_indexed_indirect_parameters_buffer,
874 ..
875 } = *batched_phase_instance_buffers;
876
877 let Some(phase_work_item_buffers) =
879 work_item_buffers.get(&view.retained_view_entity)
880 else {
881 continue;
882 };
883
884 let (
885 PreprocessWorkItemBuffers::Indirect {
886 gpu_occlusion_culling:
887 Some(GpuOcclusionCullingWorkItemBuffers {
888 late_indirect_parameters_indexed_offset,
889 late_indirect_parameters_non_indexed_offset,
890 ..
891 }),
892 ..
893 },
894 Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
895 late_indexed: maybe_late_indexed_bind_group,
896 late_non_indexed: maybe_late_non_indexed_bind_group,
897 ..
898 }),
899 Some(late_indexed_indirect_parameters_buffer),
900 Some(late_non_indexed_indirect_parameters_buffer),
901 ) = (
902 phase_work_item_buffers,
903 bind_groups.get(phase_type_id),
904 late_indexed_indirect_parameters_buffer.buffer(),
905 late_non_indexed_indirect_parameters_buffer.buffer(),
906 )
907 else {
908 continue;
909 };
910
911 let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![];
912 dynamic_offsets.push(view_uniform_offset.offset);
913
914 if let Some(late_indexed_bind_group) = maybe_late_indexed_bind_group {
921 compute_pass.set_push_constants(
922 0,
923 bytemuck::bytes_of(late_indirect_parameters_indexed_offset),
924 );
925
926 compute_pass.set_bind_group(0, late_indexed_bind_group, &dynamic_offsets);
927 compute_pass.dispatch_workgroups_indirect(
928 late_indexed_indirect_parameters_buffer,
929 (*late_indirect_parameters_indexed_offset as u64)
930 * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
931 );
932 }
933
934 if let Some(late_non_indexed_bind_group) = maybe_late_non_indexed_bind_group {
936 compute_pass.set_push_constants(
937 0,
938 bytemuck::bytes_of(late_indirect_parameters_non_indexed_offset),
939 );
940
941 compute_pass.set_bind_group(0, late_non_indexed_bind_group, &dynamic_offsets);
942 compute_pass.dispatch_workgroups_indirect(
943 late_non_indexed_indirect_parameters_buffer,
944 (*late_indirect_parameters_non_indexed_offset as u64)
945 * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
946 );
947 }
948 }
949 }
950
951 pass_span.end(&mut compute_pass);
952
953 Ok(())
954 }
955}
956
957impl Node for EarlyPrepassBuildIndirectParametersNode {
958 fn update(&mut self, world: &mut World) {
959 self.view_query.update_archetypes(world);
960 }
961
962 fn run<'w>(
963 &self,
964 _: &mut RenderGraphContext,
965 render_context: &mut RenderContext<'w>,
966 world: &'w World,
967 ) -> Result<(), NodeRunError> {
968 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
969
970 if self.view_query.iter_manual(world).next().is_none() {
973 return Ok(());
974 }
975
976 run_build_indirect_parameters_node(
977 render_context,
978 world,
979 &preprocess_pipelines.early_phase,
980 "early_prepass_indirect_parameters_building",
981 )
982 }
983}
984
985impl Node for LatePrepassBuildIndirectParametersNode {
986 fn update(&mut self, world: &mut World) {
987 self.view_query.update_archetypes(world);
988 }
989
990 fn run<'w>(
991 &self,
992 _: &mut RenderGraphContext,
993 render_context: &mut RenderContext<'w>,
994 world: &'w World,
995 ) -> Result<(), NodeRunError> {
996 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
997
998 if self.view_query.iter_manual(world).next().is_none() {
1001 return Ok(());
1002 }
1003
1004 run_build_indirect_parameters_node(
1005 render_context,
1006 world,
1007 &preprocess_pipelines.late_phase,
1008 "late_prepass_indirect_parameters_building",
1009 )
1010 }
1011}
1012
1013impl Node for MainBuildIndirectParametersNode {
1014 fn update(&mut self, world: &mut World) {
1015 self.view_query.update_archetypes(world);
1016 }
1017
1018 fn run<'w>(
1019 &self,
1020 _: &mut RenderGraphContext,
1021 render_context: &mut RenderContext<'w>,
1022 world: &'w World,
1023 ) -> Result<(), NodeRunError> {
1024 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
1025
1026 run_build_indirect_parameters_node(
1027 render_context,
1028 world,
1029 &preprocess_pipelines.main_phase,
1030 "main_indirect_parameters_building",
1031 )
1032 }
1033}
1034
1035fn run_build_indirect_parameters_node(
1036 render_context: &mut RenderContext,
1037 world: &World,
1038 preprocess_phase_pipelines: &PreprocessPhasePipelines,
1039 label: &'static str,
1040) -> Result<(), NodeRunError> {
1041 let Some(build_indirect_params_bind_groups) =
1042 world.get_resource::<BuildIndirectParametersBindGroups>()
1043 else {
1044 return Ok(());
1045 };
1046
1047 let diagnostics = render_context.diagnostic_recorder();
1048
1049 let pipeline_cache = world.resource::<PipelineCache>();
1050 let indirect_parameters_buffers = world.resource::<IndirectParametersBuffers>();
1051
1052 let mut compute_pass =
1053 render_context
1054 .command_encoder()
1055 .begin_compute_pass(&ComputePassDescriptor {
1056 label: Some(label),
1057 timestamp_writes: None,
1058 });
1059 let pass_span = diagnostics.pass_span(&mut compute_pass, label);
1060
1061 let (
1063 Some(reset_indirect_batch_sets_pipeline_id),
1064 Some(build_indexed_indirect_params_pipeline_id),
1065 Some(build_non_indexed_indirect_params_pipeline_id),
1066 ) = (
1067 preprocess_phase_pipelines
1068 .reset_indirect_batch_sets
1069 .pipeline_id,
1070 preprocess_phase_pipelines
1071 .gpu_occlusion_culling_build_indexed_indirect_params
1072 .pipeline_id,
1073 preprocess_phase_pipelines
1074 .gpu_occlusion_culling_build_non_indexed_indirect_params
1075 .pipeline_id,
1076 )
1077 else {
1078 warn!("The build indirect parameters pipelines weren't ready");
1079 pass_span.end(&mut compute_pass);
1080 return Ok(());
1081 };
1082
1083 let (
1084 Some(reset_indirect_batch_sets_pipeline),
1085 Some(build_indexed_indirect_params_pipeline),
1086 Some(build_non_indexed_indirect_params_pipeline),
1087 ) = (
1088 pipeline_cache.get_compute_pipeline(reset_indirect_batch_sets_pipeline_id),
1089 pipeline_cache.get_compute_pipeline(build_indexed_indirect_params_pipeline_id),
1090 pipeline_cache.get_compute_pipeline(build_non_indexed_indirect_params_pipeline_id),
1091 )
1092 else {
1093 pass_span.end(&mut compute_pass);
1095 return Ok(());
1096 };
1097
1098 for (phase_type_id, phase_build_indirect_params_bind_groups) in
1101 build_indirect_params_bind_groups.iter()
1102 {
1103 let Some(phase_indirect_parameters_buffers) =
1104 indirect_parameters_buffers.get(phase_type_id)
1105 else {
1106 continue;
1107 };
1108
1109 if let (
1111 Some(reset_indexed_indirect_batch_sets_bind_group),
1112 Some(build_indirect_indexed_params_bind_group),
1113 ) = (
1114 &phase_build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets,
1115 &phase_build_indirect_params_bind_groups.build_indexed_indirect,
1116 ) {
1117 compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1118 compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]);
1119 let workgroup_count = phase_indirect_parameters_buffers
1120 .batch_set_count(true)
1121 .div_ceil(WORKGROUP_SIZE);
1122 if workgroup_count > 0 {
1123 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1124 }
1125
1126 compute_pass.set_pipeline(build_indexed_indirect_params_pipeline);
1127 compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]);
1128 let workgroup_count = phase_indirect_parameters_buffers
1129 .indexed
1130 .batch_count()
1131 .div_ceil(WORKGROUP_SIZE);
1132 if workgroup_count > 0 {
1133 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1134 }
1135 }
1136
1137 if let (
1139 Some(reset_non_indexed_indirect_batch_sets_bind_group),
1140 Some(build_indirect_non_indexed_params_bind_group),
1141 ) = (
1142 &phase_build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets,
1143 &phase_build_indirect_params_bind_groups.build_non_indexed_indirect,
1144 ) {
1145 compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1146 compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]);
1147 let workgroup_count = phase_indirect_parameters_buffers
1148 .batch_set_count(false)
1149 .div_ceil(WORKGROUP_SIZE);
1150 if workgroup_count > 0 {
1151 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1152 }
1153
1154 compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline);
1155 compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]);
1156 let workgroup_count = phase_indirect_parameters_buffers
1157 .non_indexed
1158 .batch_count()
1159 .div_ceil(WORKGROUP_SIZE);
1160 if workgroup_count > 0 {
1161 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1162 }
1163 }
1164 }
1165
1166 pass_span.end(&mut compute_pass);
1167
1168 Ok(())
1169}
1170
1171impl PreprocessPipelines {
1172 pub(crate) fn pipelines_are_loaded(
1175 &self,
1176 pipeline_cache: &PipelineCache,
1177 preprocessing_support: &GpuPreprocessingSupport,
1178 ) -> bool {
1179 match preprocessing_support.max_supported_mode {
1180 GpuPreprocessingMode::None => false,
1181 GpuPreprocessingMode::PreprocessingOnly => {
1182 self.direct_preprocess.is_loaded(pipeline_cache)
1183 && self
1184 .gpu_frustum_culling_preprocess
1185 .is_loaded(pipeline_cache)
1186 }
1187 GpuPreprocessingMode::Culling => {
1188 self.direct_preprocess.is_loaded(pipeline_cache)
1189 && self
1190 .gpu_frustum_culling_preprocess
1191 .is_loaded(pipeline_cache)
1192 && self
1193 .early_gpu_occlusion_culling_preprocess
1194 .is_loaded(pipeline_cache)
1195 && self
1196 .late_gpu_occlusion_culling_preprocess
1197 .is_loaded(pipeline_cache)
1198 && self
1199 .gpu_frustum_culling_build_indexed_indirect_params
1200 .is_loaded(pipeline_cache)
1201 && self
1202 .gpu_frustum_culling_build_non_indexed_indirect_params
1203 .is_loaded(pipeline_cache)
1204 && self.early_phase.is_loaded(pipeline_cache)
1205 && self.late_phase.is_loaded(pipeline_cache)
1206 && self.main_phase.is_loaded(pipeline_cache)
1207 }
1208 }
1209 }
1210}
1211
1212impl PreprocessPhasePipelines {
1213 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1214 self.reset_indirect_batch_sets.is_loaded(pipeline_cache)
1215 && self
1216 .gpu_occlusion_culling_build_indexed_indirect_params
1217 .is_loaded(pipeline_cache)
1218 && self
1219 .gpu_occlusion_culling_build_non_indexed_indirect_params
1220 .is_loaded(pipeline_cache)
1221 }
1222}
1223
1224impl PreprocessPipeline {
1225 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1226 self.pipeline_id
1227 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1228 }
1229}
1230
1231impl ResetIndirectBatchSetsPipeline {
1232 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1233 self.pipeline_id
1234 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1235 }
1236}
1237
1238impl BuildIndirectParametersPipeline {
1239 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1242 self.pipeline_id
1243 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1244 }
1245}
1246
1247impl SpecializedComputePipeline for PreprocessPipeline {
1248 type Key = PreprocessPipelineKey;
1249
1250 fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1251 let mut shader_defs = vec!["WRITE_INDIRECT_PARAMETERS_METADATA".into()];
1252 if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1253 shader_defs.push("INDIRECT".into());
1254 shader_defs.push("FRUSTUM_CULLING".into());
1255 }
1256 if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1257 shader_defs.push("OCCLUSION_CULLING".into());
1258 if key.contains(PreprocessPipelineKey::EARLY_PHASE) {
1259 shader_defs.push("EARLY_PHASE".into());
1260 } else {
1261 shader_defs.push("LATE_PHASE".into());
1262 }
1263 }
1264
1265 ComputePipelineDescriptor {
1266 label: Some(
1267 format!(
1268 "mesh preprocessing ({})",
1269 if key.contains(
1270 PreprocessPipelineKey::OCCLUSION_CULLING
1271 | PreprocessPipelineKey::EARLY_PHASE
1272 ) {
1273 "early GPU occlusion culling"
1274 } else if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1275 "late GPU occlusion culling"
1276 } else if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1277 "GPU frustum culling"
1278 } else {
1279 "direct"
1280 }
1281 )
1282 .into(),
1283 ),
1284 layout: vec![self.bind_group_layout.clone()],
1285 push_constant_ranges: if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1286 vec![PushConstantRange {
1287 stages: ShaderStages::COMPUTE,
1288 range: 0..4,
1289 }]
1290 } else {
1291 vec![]
1292 },
1293 shader: self.shader.clone(),
1294 shader_defs,
1295 ..default()
1296 }
1297 }
1298}
1299
1300impl FromWorld for PreprocessPipelines {
1301 fn from_world(world: &mut World) -> Self {
1302 let direct_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries();
1305 let gpu_frustum_culling_bind_group_layout_entries = gpu_culling_bind_group_layout_entries();
1306 let gpu_early_occlusion_culling_bind_group_layout_entries =
1307 gpu_occlusion_culling_bind_group_layout_entries().extend_with_indices(((
1308 11,
1309 storage_buffer::<PreprocessWorkItem>(false),
1310 ),));
1311 let gpu_late_occlusion_culling_bind_group_layout_entries =
1312 gpu_occlusion_culling_bind_group_layout_entries();
1313
1314 let reset_indirect_batch_sets_bind_group_layout_entries =
1315 DynamicBindGroupLayoutEntries::sequential(
1316 ShaderStages::COMPUTE,
1317 (storage_buffer::<IndirectBatchSet>(false),),
1318 );
1319
1320 let build_indexed_indirect_params_bind_group_layout_entries =
1323 build_indirect_params_bind_group_layout_entries()
1324 .extend_sequential((storage_buffer::<IndirectParametersIndexed>(false),));
1325 let build_non_indexed_indirect_params_bind_group_layout_entries =
1326 build_indirect_params_bind_group_layout_entries()
1327 .extend_sequential((storage_buffer::<IndirectParametersNonIndexed>(false),));
1328
1329 let direct_bind_group_layout = BindGroupLayoutDescriptor::new(
1331 "build mesh uniforms direct bind group layout",
1332 &direct_bind_group_layout_entries,
1333 );
1334 let gpu_frustum_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1335 "build mesh uniforms GPU frustum culling bind group layout",
1336 &gpu_frustum_culling_bind_group_layout_entries,
1337 );
1338 let gpu_early_occlusion_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1339 "build mesh uniforms GPU early occlusion culling bind group layout",
1340 &gpu_early_occlusion_culling_bind_group_layout_entries,
1341 );
1342 let gpu_late_occlusion_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1343 "build mesh uniforms GPU late occlusion culling bind group layout",
1344 &gpu_late_occlusion_culling_bind_group_layout_entries,
1345 );
1346 let reset_indirect_batch_sets_bind_group_layout = BindGroupLayoutDescriptor::new(
1347 "reset indirect batch sets bind group layout",
1348 &reset_indirect_batch_sets_bind_group_layout_entries,
1349 );
1350 let build_indexed_indirect_params_bind_group_layout = BindGroupLayoutDescriptor::new(
1351 "build indexed indirect parameters bind group layout",
1352 &build_indexed_indirect_params_bind_group_layout_entries,
1353 );
1354 let build_non_indexed_indirect_params_bind_group_layout = BindGroupLayoutDescriptor::new(
1355 "build non-indexed indirect parameters bind group layout",
1356 &build_non_indexed_indirect_params_bind_group_layout_entries,
1357 );
1358
1359 let preprocess_shader = load_embedded_asset!(world, "mesh_preprocess.wgsl");
1360 let reset_indirect_batch_sets_shader =
1361 load_embedded_asset!(world, "reset_indirect_batch_sets.wgsl");
1362 let build_indirect_params_shader =
1363 load_embedded_asset!(world, "build_indirect_params.wgsl");
1364
1365 let preprocess_phase_pipelines = PreprocessPhasePipelines {
1366 reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline {
1367 bind_group_layout: reset_indirect_batch_sets_bind_group_layout.clone(),
1368 shader: reset_indirect_batch_sets_shader,
1369 pipeline_id: None,
1370 },
1371 gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1372 bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1373 shader: build_indirect_params_shader.clone(),
1374 pipeline_id: None,
1375 },
1376 gpu_occlusion_culling_build_non_indexed_indirect_params:
1377 BuildIndirectParametersPipeline {
1378 bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1379 shader: build_indirect_params_shader.clone(),
1380 pipeline_id: None,
1381 },
1382 };
1383
1384 PreprocessPipelines {
1385 direct_preprocess: PreprocessPipeline {
1386 bind_group_layout: direct_bind_group_layout,
1387 shader: preprocess_shader.clone(),
1388 pipeline_id: None,
1389 },
1390 gpu_frustum_culling_preprocess: PreprocessPipeline {
1391 bind_group_layout: gpu_frustum_culling_bind_group_layout,
1392 shader: preprocess_shader.clone(),
1393 pipeline_id: None,
1394 },
1395 early_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1396 bind_group_layout: gpu_early_occlusion_culling_bind_group_layout,
1397 shader: preprocess_shader.clone(),
1398 pipeline_id: None,
1399 },
1400 late_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1401 bind_group_layout: gpu_late_occlusion_culling_bind_group_layout,
1402 shader: preprocess_shader,
1403 pipeline_id: None,
1404 },
1405 gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1406 bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1407 shader: build_indirect_params_shader.clone(),
1408 pipeline_id: None,
1409 },
1410 gpu_frustum_culling_build_non_indexed_indirect_params:
1411 BuildIndirectParametersPipeline {
1412 bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1413 shader: build_indirect_params_shader,
1414 pipeline_id: None,
1415 },
1416 early_phase: preprocess_phase_pipelines.clone(),
1417 late_phase: preprocess_phase_pipelines.clone(),
1418 main_phase: preprocess_phase_pipelines.clone(),
1419 }
1420 }
1421}
1422
1423fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1424 DynamicBindGroupLayoutEntries::new_with_indices(
1425 ShaderStages::COMPUTE,
1426 (
1427 (
1429 0,
1430 uniform_buffer::<ViewUniform>(true),
1431 ),
1432 (3, storage_buffer_read_only::<MeshInputUniform>(false)),
1434 (4, storage_buffer_read_only::<MeshInputUniform>(false)),
1436 (5, storage_buffer_read_only::<PreprocessWorkItem>(false)),
1438 (6, storage_buffer::<MeshUniform>(false)),
1440 ),
1441 )
1442}
1443
1444fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1447 DynamicBindGroupLayoutEntries::new_with_indices(
1448 ShaderStages::COMPUTE,
1449 (
1450 (0, storage_buffer_read_only::<MeshInputUniform>(false)),
1451 (
1452 1,
1453 storage_buffer_read_only::<IndirectParametersCpuMetadata>(false),
1454 ),
1455 (
1456 2,
1457 storage_buffer_read_only::<IndirectParametersGpuMetadata>(false),
1458 ),
1459 (3, storage_buffer::<IndirectBatchSet>(false)),
1460 ),
1461 )
1462}
1463
1464fn gpu_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1467 preprocess_direct_bind_group_layout_entries().extend_with_indices((
1470 (
1472 7,
1473 storage_buffer_read_only::<IndirectParametersCpuMetadata>(
1474 false,
1475 ),
1476 ),
1477 (
1479 8,
1480 storage_buffer::<IndirectParametersGpuMetadata>(false),
1481 ),
1482 (
1484 9,
1485 storage_buffer_read_only::<MeshCullingData>(false),
1486 ),
1487 ))
1488}
1489
1490fn gpu_occlusion_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1491 gpu_culling_bind_group_layout_entries().extend_with_indices((
1492 (
1493 2,
1494 uniform_buffer::<PreviousViewData>(false),
1495 ),
1496 (
1497 10,
1498 texture_2d(TextureSampleType::Float { filterable: true }),
1499 ),
1500 (
1501 12,
1502 storage_buffer::<LatePreprocessWorkItemIndirectParameters>(
1503 false,
1504 ),
1505 ),
1506 ))
1507}
1508
1509pub fn prepare_preprocess_pipelines(
1511 pipeline_cache: Res<PipelineCache>,
1512 render_device: Res<RenderDevice>,
1513 mut specialized_preprocess_pipelines: ResMut<SpecializedComputePipelines<PreprocessPipeline>>,
1514 mut specialized_reset_indirect_batch_sets_pipelines: ResMut<
1515 SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1516 >,
1517 mut specialized_build_indirect_parameters_pipelines: ResMut<
1518 SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1519 >,
1520 preprocess_pipelines: ResMut<PreprocessPipelines>,
1521 gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1522) {
1523 let preprocess_pipelines = preprocess_pipelines.into_inner();
1524
1525 preprocess_pipelines.direct_preprocess.prepare(
1526 &pipeline_cache,
1527 &mut specialized_preprocess_pipelines,
1528 PreprocessPipelineKey::empty(),
1529 );
1530 preprocess_pipelines.gpu_frustum_culling_preprocess.prepare(
1531 &pipeline_cache,
1532 &mut specialized_preprocess_pipelines,
1533 PreprocessPipelineKey::FRUSTUM_CULLING,
1534 );
1535
1536 if gpu_preprocessing_support.is_culling_supported() {
1537 preprocess_pipelines
1538 .early_gpu_occlusion_culling_preprocess
1539 .prepare(
1540 &pipeline_cache,
1541 &mut specialized_preprocess_pipelines,
1542 PreprocessPipelineKey::FRUSTUM_CULLING
1543 | PreprocessPipelineKey::OCCLUSION_CULLING
1544 | PreprocessPipelineKey::EARLY_PHASE,
1545 );
1546 preprocess_pipelines
1547 .late_gpu_occlusion_culling_preprocess
1548 .prepare(
1549 &pipeline_cache,
1550 &mut specialized_preprocess_pipelines,
1551 PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING,
1552 );
1553 }
1554
1555 let mut build_indirect_parameters_pipeline_key = BuildIndirectParametersPipelineKey::empty();
1556
1557 if render_device
1560 .wgpu_device()
1561 .features()
1562 .contains(WgpuFeatures::MULTI_DRAW_INDIRECT_COUNT)
1563 {
1564 build_indirect_parameters_pipeline_key
1565 .insert(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED);
1566 }
1567
1568 preprocess_pipelines
1569 .gpu_frustum_culling_build_indexed_indirect_params
1570 .prepare(
1571 &pipeline_cache,
1572 &mut specialized_build_indirect_parameters_pipelines,
1573 build_indirect_parameters_pipeline_key | BuildIndirectParametersPipelineKey::INDEXED,
1574 );
1575 preprocess_pipelines
1576 .gpu_frustum_culling_build_non_indexed_indirect_params
1577 .prepare(
1578 &pipeline_cache,
1579 &mut specialized_build_indirect_parameters_pipelines,
1580 build_indirect_parameters_pipeline_key,
1581 );
1582
1583 if !gpu_preprocessing_support.is_culling_supported() {
1584 return;
1585 }
1586
1587 for (preprocess_phase_pipelines, build_indirect_parameters_phase_pipeline_key) in [
1588 (
1589 &mut preprocess_pipelines.early_phase,
1590 BuildIndirectParametersPipelineKey::EARLY_PHASE,
1591 ),
1592 (
1593 &mut preprocess_pipelines.late_phase,
1594 BuildIndirectParametersPipelineKey::LATE_PHASE,
1595 ),
1596 (
1597 &mut preprocess_pipelines.main_phase,
1598 BuildIndirectParametersPipelineKey::MAIN_PHASE,
1599 ),
1600 ] {
1601 preprocess_phase_pipelines
1602 .reset_indirect_batch_sets
1603 .prepare(
1604 &pipeline_cache,
1605 &mut specialized_reset_indirect_batch_sets_pipelines,
1606 );
1607 preprocess_phase_pipelines
1608 .gpu_occlusion_culling_build_indexed_indirect_params
1609 .prepare(
1610 &pipeline_cache,
1611 &mut specialized_build_indirect_parameters_pipelines,
1612 build_indirect_parameters_pipeline_key
1613 | build_indirect_parameters_phase_pipeline_key
1614 | BuildIndirectParametersPipelineKey::INDEXED
1615 | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1616 );
1617 preprocess_phase_pipelines
1618 .gpu_occlusion_culling_build_non_indexed_indirect_params
1619 .prepare(
1620 &pipeline_cache,
1621 &mut specialized_build_indirect_parameters_pipelines,
1622 build_indirect_parameters_pipeline_key
1623 | build_indirect_parameters_phase_pipeline_key
1624 | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1625 );
1626 }
1627}
1628
1629impl PreprocessPipeline {
1630 fn prepare(
1631 &mut self,
1632 pipeline_cache: &PipelineCache,
1633 pipelines: &mut SpecializedComputePipelines<PreprocessPipeline>,
1634 key: PreprocessPipelineKey,
1635 ) {
1636 if self.pipeline_id.is_some() {
1637 return;
1638 }
1639
1640 let preprocess_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1641 self.pipeline_id = Some(preprocess_pipeline_id);
1642 }
1643}
1644
1645impl SpecializedComputePipeline for ResetIndirectBatchSetsPipeline {
1646 type Key = ();
1647
1648 fn specialize(&self, _: Self::Key) -> ComputePipelineDescriptor {
1649 ComputePipelineDescriptor {
1650 label: Some("reset indirect batch sets".into()),
1651 layout: vec![self.bind_group_layout.clone()],
1652 shader: self.shader.clone(),
1653 ..default()
1654 }
1655 }
1656}
1657
1658impl SpecializedComputePipeline for BuildIndirectParametersPipeline {
1659 type Key = BuildIndirectParametersPipelineKey;
1660
1661 fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1662 let mut shader_defs = vec![];
1663 if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1664 shader_defs.push("INDEXED".into());
1665 }
1666 if key.contains(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED) {
1667 shader_defs.push("MULTI_DRAW_INDIRECT_COUNT_SUPPORTED".into());
1668 }
1669 if key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1670 shader_defs.push("OCCLUSION_CULLING".into());
1671 }
1672 if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1673 shader_defs.push("EARLY_PHASE".into());
1674 }
1675 if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1676 shader_defs.push("LATE_PHASE".into());
1677 }
1678 if key.contains(BuildIndirectParametersPipelineKey::MAIN_PHASE) {
1679 shader_defs.push("MAIN_PHASE".into());
1680 }
1681
1682 let label = format!(
1683 "{} build {}indexed indirect parameters",
1684 if !key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1685 "frustum culling"
1686 } else if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1687 "early occlusion culling"
1688 } else if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1689 "late occlusion culling"
1690 } else {
1691 "main occlusion culling"
1692 },
1693 if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1694 ""
1695 } else {
1696 "non-"
1697 }
1698 );
1699
1700 ComputePipelineDescriptor {
1701 label: Some(label.into()),
1702 layout: vec![self.bind_group_layout.clone()],
1703 shader: self.shader.clone(),
1704 shader_defs,
1705 ..default()
1706 }
1707 }
1708}
1709
1710impl ResetIndirectBatchSetsPipeline {
1711 fn prepare(
1712 &mut self,
1713 pipeline_cache: &PipelineCache,
1714 pipelines: &mut SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1715 ) {
1716 if self.pipeline_id.is_some() {
1717 return;
1718 }
1719
1720 let reset_indirect_batch_sets_pipeline_id = pipelines.specialize(pipeline_cache, self, ());
1721 self.pipeline_id = Some(reset_indirect_batch_sets_pipeline_id);
1722 }
1723}
1724
1725impl BuildIndirectParametersPipeline {
1726 fn prepare(
1727 &mut self,
1728 pipeline_cache: &PipelineCache,
1729 pipelines: &mut SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1730 key: BuildIndirectParametersPipelineKey,
1731 ) {
1732 if self.pipeline_id.is_some() {
1733 return;
1734 }
1735
1736 let build_indirect_parameters_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1737 self.pipeline_id = Some(build_indirect_parameters_pipeline_id);
1738 }
1739}
1740
1741#[expect(
1744 clippy::too_many_arguments,
1745 reason = "it's a system that needs a lot of arguments"
1746)]
1747pub fn prepare_preprocess_bind_groups(
1748 mut commands: Commands,
1749 views: Query<(Entity, &ExtractedView)>,
1750 view_depth_pyramids: Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1751 render_device: Res<RenderDevice>,
1752 pipeline_cache: Res<PipelineCache>,
1753 batched_instance_buffers: Res<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
1754 indirect_parameters_buffers: Res<IndirectParametersBuffers>,
1755 mesh_culling_data_buffer: Res<MeshCullingDataBuffer>,
1756 view_uniforms: Res<ViewUniforms>,
1757 previous_view_uniforms: Res<PreviousViewUniforms>,
1758 pipelines: Res<PreprocessPipelines>,
1759) {
1760 let BatchedInstanceBuffers {
1762 current_input_buffer: current_input_buffer_vec,
1763 previous_input_buffer: previous_input_buffer_vec,
1764 phase_instance_buffers,
1765 } = batched_instance_buffers.into_inner();
1766
1767 let (Some(current_input_buffer), Some(previous_input_buffer)) = (
1768 current_input_buffer_vec.buffer().buffer(),
1769 previous_input_buffer_vec.buffer().buffer(),
1770 ) else {
1771 return;
1772 };
1773
1774 let mut any_indirect = false;
1777
1778 for (view_entity, view) in &views {
1780 let mut bind_groups = TypeIdMap::default();
1781
1782 for (phase_type_id, phase_instance_buffers) in phase_instance_buffers {
1784 let UntypedPhaseBatchedInstanceBuffers {
1785 data_buffer: ref data_buffer_vec,
1786 ref work_item_buffers,
1787 ref late_indexed_indirect_parameters_buffer,
1788 ref late_non_indexed_indirect_parameters_buffer,
1789 } = *phase_instance_buffers;
1790
1791 let Some(data_buffer) = data_buffer_vec.buffer() else {
1792 continue;
1793 };
1794
1795 let Some(phase_indirect_parameters_buffers) =
1797 indirect_parameters_buffers.get(phase_type_id)
1798 else {
1799 continue;
1800 };
1801
1802 let Some(work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) else {
1803 continue;
1804 };
1805
1806 let preprocess_bind_group_builder = PreprocessBindGroupBuilder {
1808 view: view_entity,
1809 late_indexed_indirect_parameters_buffer,
1810 late_non_indexed_indirect_parameters_buffer,
1811 render_device: &render_device,
1812 pipeline_cache: &pipeline_cache,
1813 phase_indirect_parameters_buffers,
1814 mesh_culling_data_buffer: &mesh_culling_data_buffer,
1815 view_uniforms: &view_uniforms,
1816 previous_view_uniforms: &previous_view_uniforms,
1817 pipelines: &pipelines,
1818 current_input_buffer,
1819 previous_input_buffer,
1820 data_buffer,
1821 };
1822
1823 let (was_indirect, bind_group) = match *work_item_buffers {
1826 PreprocessWorkItemBuffers::Direct(ref work_item_buffer) => (
1827 false,
1828 preprocess_bind_group_builder
1829 .create_direct_preprocess_bind_groups(work_item_buffer),
1830 ),
1831
1832 PreprocessWorkItemBuffers::Indirect {
1833 indexed: ref indexed_work_item_buffer,
1834 non_indexed: ref non_indexed_work_item_buffer,
1835 gpu_occlusion_culling: Some(ref gpu_occlusion_culling_work_item_buffers),
1836 } => (
1837 true,
1838 preprocess_bind_group_builder
1839 .create_indirect_occlusion_culling_preprocess_bind_groups(
1840 &view_depth_pyramids,
1841 indexed_work_item_buffer,
1842 non_indexed_work_item_buffer,
1843 gpu_occlusion_culling_work_item_buffers,
1844 ),
1845 ),
1846
1847 PreprocessWorkItemBuffers::Indirect {
1848 indexed: ref indexed_work_item_buffer,
1849 non_indexed: ref non_indexed_work_item_buffer,
1850 gpu_occlusion_culling: None,
1851 } => (
1852 true,
1853 preprocess_bind_group_builder
1854 .create_indirect_frustum_culling_preprocess_bind_groups(
1855 indexed_work_item_buffer,
1856 non_indexed_work_item_buffer,
1857 ),
1858 ),
1859 };
1860
1861 if let Some(bind_group) = bind_group {
1863 any_indirect = any_indirect || was_indirect;
1864 bind_groups.insert(*phase_type_id, bind_group);
1865 }
1866 }
1867
1868 commands
1870 .entity(view_entity)
1871 .insert(PreprocessBindGroups(bind_groups));
1872 }
1873
1874 if any_indirect {
1877 create_build_indirect_parameters_bind_groups(
1878 &mut commands,
1879 &render_device,
1880 &pipeline_cache,
1881 &pipelines,
1882 current_input_buffer,
1883 &indirect_parameters_buffers,
1884 );
1885 }
1886}
1887
1888struct PreprocessBindGroupBuilder<'a> {
1891 view: Entity,
1893 late_indexed_indirect_parameters_buffer:
1896 &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1897 late_non_indexed_indirect_parameters_buffer:
1900 &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1901 render_device: &'a RenderDevice,
1903 pipeline_cache: &'a PipelineCache,
1905 phase_indirect_parameters_buffers: &'a UntypedPhaseIndirectParametersBuffers,
1907 mesh_culling_data_buffer: &'a MeshCullingDataBuffer,
1909 view_uniforms: &'a ViewUniforms,
1911 previous_view_uniforms: &'a PreviousViewUniforms,
1913 pipelines: &'a PreprocessPipelines,
1915 current_input_buffer: &'a Buffer,
1918 previous_input_buffer: &'a Buffer,
1921 data_buffer: &'a Buffer,
1927}
1928
1929impl<'a> PreprocessBindGroupBuilder<'a> {
1930 fn create_direct_preprocess_bind_groups(
1933 &self,
1934 work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1935 ) -> Option<PhasePreprocessBindGroups> {
1936 let work_item_buffer_size = NonZero::<u64>::try_from(
1940 work_item_buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()),
1941 )
1942 .ok();
1943
1944 Some(PhasePreprocessBindGroups::Direct(
1945 self.render_device.create_bind_group(
1946 "preprocess_direct_bind_group",
1947 &self
1948 .pipeline_cache
1949 .get_bind_group_layout(&self.pipelines.direct_preprocess.bind_group_layout),
1950 &BindGroupEntries::with_indices((
1951 (0, self.view_uniforms.uniforms.binding()?),
1952 (3, self.current_input_buffer.as_entire_binding()),
1953 (4, self.previous_input_buffer.as_entire_binding()),
1954 (
1955 5,
1956 BindingResource::Buffer(BufferBinding {
1957 buffer: work_item_buffer.buffer()?,
1958 offset: 0,
1959 size: work_item_buffer_size,
1960 }),
1961 ),
1962 (6, self.data_buffer.as_entire_binding()),
1963 )),
1964 ),
1965 ))
1966 }
1967
1968 fn create_indirect_occlusion_culling_preprocess_bind_groups(
1971 &self,
1972 view_depth_pyramids: &Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1973 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1974 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1975 gpu_occlusion_culling_work_item_buffers: &GpuOcclusionCullingWorkItemBuffers,
1976 ) -> Option<PhasePreprocessBindGroups> {
1977 let GpuOcclusionCullingWorkItemBuffers {
1978 late_indexed: ref late_indexed_work_item_buffer,
1979 late_non_indexed: ref late_non_indexed_work_item_buffer,
1980 ..
1981 } = *gpu_occlusion_culling_work_item_buffers;
1982
1983 let (view_depth_pyramid, previous_view_uniform_offset) =
1984 view_depth_pyramids.get(self.view).ok()?;
1985
1986 Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
1987 early_indexed: self.create_indirect_occlusion_culling_early_indexed_bind_group(
1988 view_depth_pyramid,
1989 previous_view_uniform_offset,
1990 indexed_work_item_buffer,
1991 late_indexed_work_item_buffer,
1992 ),
1993
1994 early_non_indexed: self.create_indirect_occlusion_culling_early_non_indexed_bind_group(
1995 view_depth_pyramid,
1996 previous_view_uniform_offset,
1997 non_indexed_work_item_buffer,
1998 late_non_indexed_work_item_buffer,
1999 ),
2000
2001 late_indexed: self.create_indirect_occlusion_culling_late_indexed_bind_group(
2002 view_depth_pyramid,
2003 previous_view_uniform_offset,
2004 late_indexed_work_item_buffer,
2005 ),
2006
2007 late_non_indexed: self.create_indirect_occlusion_culling_late_non_indexed_bind_group(
2008 view_depth_pyramid,
2009 previous_view_uniform_offset,
2010 late_non_indexed_work_item_buffer,
2011 ),
2012 })
2013 }
2014
2015 fn create_indirect_occlusion_culling_early_indexed_bind_group(
2018 &self,
2019 view_depth_pyramid: &ViewDepthPyramid,
2020 previous_view_uniform_offset: &PreviousViewUniformOffset,
2021 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2022 late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2023 ) -> Option<BindGroup> {
2024 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2025 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2026 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2027
2028 match (
2029 self.phase_indirect_parameters_buffers
2030 .indexed
2031 .cpu_metadata_buffer(),
2032 self.phase_indirect_parameters_buffers
2033 .indexed
2034 .gpu_metadata_buffer(),
2035 indexed_work_item_buffer.buffer(),
2036 late_indexed_work_item_buffer.buffer(),
2037 self.late_indexed_indirect_parameters_buffer.buffer(),
2038 ) {
2039 (
2040 Some(indexed_cpu_metadata_buffer),
2041 Some(indexed_gpu_metadata_buffer),
2042 Some(indexed_work_item_gpu_buffer),
2043 Some(late_indexed_work_item_gpu_buffer),
2044 Some(late_indexed_indirect_parameters_buffer),
2045 ) => {
2046 let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2050 indexed_work_item_buffer.len() as u64
2051 * u64::from(PreprocessWorkItem::min_size()),
2052 )
2053 .ok();
2054
2055 Some(
2056 self.render_device.create_bind_group(
2057 "preprocess_early_indexed_gpu_occlusion_culling_bind_group",
2058 &self.pipeline_cache.get_bind_group_layout(
2059 &self
2060 .pipelines
2061 .early_gpu_occlusion_culling_preprocess
2062 .bind_group_layout,
2063 ),
2064 &BindGroupEntries::with_indices((
2065 (3, self.current_input_buffer.as_entire_binding()),
2066 (4, self.previous_input_buffer.as_entire_binding()),
2067 (
2068 5,
2069 BindingResource::Buffer(BufferBinding {
2070 buffer: indexed_work_item_gpu_buffer,
2071 offset: 0,
2072 size: indexed_work_item_buffer_size,
2073 }),
2074 ),
2075 (6, self.data_buffer.as_entire_binding()),
2076 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2077 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2078 (9, mesh_culling_data_buffer.as_entire_binding()),
2079 (0, view_uniforms_binding.clone()),
2080 (10, &view_depth_pyramid.all_mips),
2081 (
2082 2,
2083 BufferBinding {
2084 buffer: previous_view_buffer,
2085 offset: previous_view_uniform_offset.offset as u64,
2086 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2087 },
2088 ),
2089 (
2090 11,
2091 BufferBinding {
2092 buffer: late_indexed_work_item_gpu_buffer,
2093 offset: 0,
2094 size: indexed_work_item_buffer_size,
2095 },
2096 ),
2097 (
2098 12,
2099 BufferBinding {
2100 buffer: late_indexed_indirect_parameters_buffer,
2101 offset: 0,
2102 size: NonZeroU64::new(
2103 late_indexed_indirect_parameters_buffer.size(),
2104 ),
2105 },
2106 ),
2107 )),
2108 ),
2109 )
2110 }
2111 _ => None,
2112 }
2113 }
2114
2115 fn create_indirect_occlusion_culling_early_non_indexed_bind_group(
2118 &self,
2119 view_depth_pyramid: &ViewDepthPyramid,
2120 previous_view_uniform_offset: &PreviousViewUniformOffset,
2121 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2122 late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2123 ) -> Option<BindGroup> {
2124 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2125 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2126 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2127
2128 match (
2129 self.phase_indirect_parameters_buffers
2130 .non_indexed
2131 .cpu_metadata_buffer(),
2132 self.phase_indirect_parameters_buffers
2133 .non_indexed
2134 .gpu_metadata_buffer(),
2135 non_indexed_work_item_buffer.buffer(),
2136 late_non_indexed_work_item_buffer.buffer(),
2137 self.late_non_indexed_indirect_parameters_buffer.buffer(),
2138 ) {
2139 (
2140 Some(non_indexed_cpu_metadata_buffer),
2141 Some(non_indexed_gpu_metadata_buffer),
2142 Some(non_indexed_work_item_gpu_buffer),
2143 Some(late_non_indexed_work_item_buffer),
2144 Some(late_non_indexed_indirect_parameters_buffer),
2145 ) => {
2146 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2150 non_indexed_work_item_buffer.len() as u64
2151 * u64::from(PreprocessWorkItem::min_size()),
2152 )
2153 .ok();
2154
2155 Some(
2156 self.render_device.create_bind_group(
2157 "preprocess_early_non_indexed_gpu_occlusion_culling_bind_group",
2158 &self.pipeline_cache.get_bind_group_layout(
2159 &self
2160 .pipelines
2161 .early_gpu_occlusion_culling_preprocess
2162 .bind_group_layout,
2163 ),
2164 &BindGroupEntries::with_indices((
2165 (3, self.current_input_buffer.as_entire_binding()),
2166 (4, self.previous_input_buffer.as_entire_binding()),
2167 (
2168 5,
2169 BindingResource::Buffer(BufferBinding {
2170 buffer: non_indexed_work_item_gpu_buffer,
2171 offset: 0,
2172 size: non_indexed_work_item_buffer_size,
2173 }),
2174 ),
2175 (6, self.data_buffer.as_entire_binding()),
2176 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2177 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2178 (9, mesh_culling_data_buffer.as_entire_binding()),
2179 (0, view_uniforms_binding.clone()),
2180 (10, &view_depth_pyramid.all_mips),
2181 (
2182 2,
2183 BufferBinding {
2184 buffer: previous_view_buffer,
2185 offset: previous_view_uniform_offset.offset as u64,
2186 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2187 },
2188 ),
2189 (
2190 11,
2191 BufferBinding {
2192 buffer: late_non_indexed_work_item_buffer,
2193 offset: 0,
2194 size: non_indexed_work_item_buffer_size,
2195 },
2196 ),
2197 (
2198 12,
2199 BufferBinding {
2200 buffer: late_non_indexed_indirect_parameters_buffer,
2201 offset: 0,
2202 size: NonZeroU64::new(
2203 late_non_indexed_indirect_parameters_buffer.size(),
2204 ),
2205 },
2206 ),
2207 )),
2208 ),
2209 )
2210 }
2211 _ => None,
2212 }
2213 }
2214
2215 fn create_indirect_occlusion_culling_late_indexed_bind_group(
2218 &self,
2219 view_depth_pyramid: &ViewDepthPyramid,
2220 previous_view_uniform_offset: &PreviousViewUniformOffset,
2221 late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2222 ) -> Option<BindGroup> {
2223 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2224 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2225 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2226
2227 match (
2228 self.phase_indirect_parameters_buffers
2229 .indexed
2230 .cpu_metadata_buffer(),
2231 self.phase_indirect_parameters_buffers
2232 .indexed
2233 .gpu_metadata_buffer(),
2234 late_indexed_work_item_buffer.buffer(),
2235 self.late_indexed_indirect_parameters_buffer.buffer(),
2236 ) {
2237 (
2238 Some(indexed_cpu_metadata_buffer),
2239 Some(indexed_gpu_metadata_buffer),
2240 Some(late_indexed_work_item_gpu_buffer),
2241 Some(late_indexed_indirect_parameters_buffer),
2242 ) => {
2243 let late_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2247 late_indexed_work_item_buffer.len() as u64
2248 * u64::from(PreprocessWorkItem::min_size()),
2249 )
2250 .ok();
2251
2252 Some(
2253 self.render_device.create_bind_group(
2254 "preprocess_late_indexed_gpu_occlusion_culling_bind_group",
2255 &self.pipeline_cache.get_bind_group_layout(
2256 &self
2257 .pipelines
2258 .late_gpu_occlusion_culling_preprocess
2259 .bind_group_layout,
2260 ),
2261 &BindGroupEntries::with_indices((
2262 (3, self.current_input_buffer.as_entire_binding()),
2263 (4, self.previous_input_buffer.as_entire_binding()),
2264 (
2265 5,
2266 BindingResource::Buffer(BufferBinding {
2267 buffer: late_indexed_work_item_gpu_buffer,
2268 offset: 0,
2269 size: late_indexed_work_item_buffer_size,
2270 }),
2271 ),
2272 (6, self.data_buffer.as_entire_binding()),
2273 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2274 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2275 (9, mesh_culling_data_buffer.as_entire_binding()),
2276 (0, view_uniforms_binding.clone()),
2277 (10, &view_depth_pyramid.all_mips),
2278 (
2279 2,
2280 BufferBinding {
2281 buffer: previous_view_buffer,
2282 offset: previous_view_uniform_offset.offset as u64,
2283 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2284 },
2285 ),
2286 (
2287 12,
2288 BufferBinding {
2289 buffer: late_indexed_indirect_parameters_buffer,
2290 offset: 0,
2291 size: NonZeroU64::new(
2292 late_indexed_indirect_parameters_buffer.size(),
2293 ),
2294 },
2295 ),
2296 )),
2297 ),
2298 )
2299 }
2300 _ => None,
2301 }
2302 }
2303
2304 fn create_indirect_occlusion_culling_late_non_indexed_bind_group(
2307 &self,
2308 view_depth_pyramid: &ViewDepthPyramid,
2309 previous_view_uniform_offset: &PreviousViewUniformOffset,
2310 late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2311 ) -> Option<BindGroup> {
2312 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2313 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2314 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2315
2316 match (
2317 self.phase_indirect_parameters_buffers
2318 .non_indexed
2319 .cpu_metadata_buffer(),
2320 self.phase_indirect_parameters_buffers
2321 .non_indexed
2322 .gpu_metadata_buffer(),
2323 late_non_indexed_work_item_buffer.buffer(),
2324 self.late_non_indexed_indirect_parameters_buffer.buffer(),
2325 ) {
2326 (
2327 Some(non_indexed_cpu_metadata_buffer),
2328 Some(non_indexed_gpu_metadata_buffer),
2329 Some(non_indexed_work_item_gpu_buffer),
2330 Some(late_non_indexed_indirect_parameters_buffer),
2331 ) => {
2332 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2336 late_non_indexed_work_item_buffer.len() as u64
2337 * u64::from(PreprocessWorkItem::min_size()),
2338 )
2339 .ok();
2340
2341 Some(
2342 self.render_device.create_bind_group(
2343 "preprocess_late_non_indexed_gpu_occlusion_culling_bind_group",
2344 &self.pipeline_cache.get_bind_group_layout(
2345 &self
2346 .pipelines
2347 .late_gpu_occlusion_culling_preprocess
2348 .bind_group_layout,
2349 ),
2350 &BindGroupEntries::with_indices((
2351 (3, self.current_input_buffer.as_entire_binding()),
2352 (4, self.previous_input_buffer.as_entire_binding()),
2353 (
2354 5,
2355 BindingResource::Buffer(BufferBinding {
2356 buffer: non_indexed_work_item_gpu_buffer,
2357 offset: 0,
2358 size: non_indexed_work_item_buffer_size,
2359 }),
2360 ),
2361 (6, self.data_buffer.as_entire_binding()),
2362 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2363 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2364 (9, mesh_culling_data_buffer.as_entire_binding()),
2365 (0, view_uniforms_binding.clone()),
2366 (10, &view_depth_pyramid.all_mips),
2367 (
2368 2,
2369 BufferBinding {
2370 buffer: previous_view_buffer,
2371 offset: previous_view_uniform_offset.offset as u64,
2372 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2373 },
2374 ),
2375 (
2376 12,
2377 BufferBinding {
2378 buffer: late_non_indexed_indirect_parameters_buffer,
2379 offset: 0,
2380 size: NonZeroU64::new(
2381 late_non_indexed_indirect_parameters_buffer.size(),
2382 ),
2383 },
2384 ),
2385 )),
2386 ),
2387 )
2388 }
2389 _ => None,
2390 }
2391 }
2392
2393 fn create_indirect_frustum_culling_preprocess_bind_groups(
2396 &self,
2397 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2398 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2399 ) -> Option<PhasePreprocessBindGroups> {
2400 Some(PhasePreprocessBindGroups::IndirectFrustumCulling {
2401 indexed: self
2402 .create_indirect_frustum_culling_indexed_bind_group(indexed_work_item_buffer),
2403 non_indexed: self.create_indirect_frustum_culling_non_indexed_bind_group(
2404 non_indexed_work_item_buffer,
2405 ),
2406 })
2407 }
2408
2409 fn create_indirect_frustum_culling_indexed_bind_group(
2412 &self,
2413 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2414 ) -> Option<BindGroup> {
2415 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2416 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2417
2418 match (
2419 self.phase_indirect_parameters_buffers
2420 .indexed
2421 .cpu_metadata_buffer(),
2422 self.phase_indirect_parameters_buffers
2423 .indexed
2424 .gpu_metadata_buffer(),
2425 indexed_work_item_buffer.buffer(),
2426 ) {
2427 (
2428 Some(indexed_cpu_metadata_buffer),
2429 Some(indexed_gpu_metadata_buffer),
2430 Some(indexed_work_item_gpu_buffer),
2431 ) => {
2432 let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2436 indexed_work_item_buffer.len() as u64
2437 * u64::from(PreprocessWorkItem::min_size()),
2438 )
2439 .ok();
2440
2441 Some(
2442 self.render_device.create_bind_group(
2443 "preprocess_gpu_indexed_frustum_culling_bind_group",
2444 &self.pipeline_cache.get_bind_group_layout(
2445 &self
2446 .pipelines
2447 .gpu_frustum_culling_preprocess
2448 .bind_group_layout,
2449 ),
2450 &BindGroupEntries::with_indices((
2451 (3, self.current_input_buffer.as_entire_binding()),
2452 (4, self.previous_input_buffer.as_entire_binding()),
2453 (
2454 5,
2455 BindingResource::Buffer(BufferBinding {
2456 buffer: indexed_work_item_gpu_buffer,
2457 offset: 0,
2458 size: indexed_work_item_buffer_size,
2459 }),
2460 ),
2461 (6, self.data_buffer.as_entire_binding()),
2462 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2463 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2464 (9, mesh_culling_data_buffer.as_entire_binding()),
2465 (0, view_uniforms_binding.clone()),
2466 )),
2467 ),
2468 )
2469 }
2470 _ => None,
2471 }
2472 }
2473
2474 fn create_indirect_frustum_culling_non_indexed_bind_group(
2477 &self,
2478 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2479 ) -> Option<BindGroup> {
2480 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2481 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2482
2483 match (
2484 self.phase_indirect_parameters_buffers
2485 .non_indexed
2486 .cpu_metadata_buffer(),
2487 self.phase_indirect_parameters_buffers
2488 .non_indexed
2489 .gpu_metadata_buffer(),
2490 non_indexed_work_item_buffer.buffer(),
2491 ) {
2492 (
2493 Some(non_indexed_cpu_metadata_buffer),
2494 Some(non_indexed_gpu_metadata_buffer),
2495 Some(non_indexed_work_item_gpu_buffer),
2496 ) => {
2497 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2501 non_indexed_work_item_buffer.len() as u64
2502 * u64::from(PreprocessWorkItem::min_size()),
2503 )
2504 .ok();
2505
2506 Some(
2507 self.render_device.create_bind_group(
2508 "preprocess_gpu_non_indexed_frustum_culling_bind_group",
2509 &self.pipeline_cache.get_bind_group_layout(
2510 &self
2511 .pipelines
2512 .gpu_frustum_culling_preprocess
2513 .bind_group_layout,
2514 ),
2515 &BindGroupEntries::with_indices((
2516 (3, self.current_input_buffer.as_entire_binding()),
2517 (4, self.previous_input_buffer.as_entire_binding()),
2518 (
2519 5,
2520 BindingResource::Buffer(BufferBinding {
2521 buffer: non_indexed_work_item_gpu_buffer,
2522 offset: 0,
2523 size: non_indexed_work_item_buffer_size,
2524 }),
2525 ),
2526 (6, self.data_buffer.as_entire_binding()),
2527 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2528 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2529 (9, mesh_culling_data_buffer.as_entire_binding()),
2530 (0, view_uniforms_binding.clone()),
2531 )),
2532 ),
2533 )
2534 }
2535 _ => None,
2536 }
2537 }
2538}
2539
2540fn create_build_indirect_parameters_bind_groups(
2544 commands: &mut Commands,
2545 render_device: &RenderDevice,
2546 pipeline_cache: &PipelineCache,
2547 pipelines: &PreprocessPipelines,
2548 current_input_buffer: &Buffer,
2549 indirect_parameters_buffers: &IndirectParametersBuffers,
2550) {
2551 let mut build_indirect_parameters_bind_groups = BuildIndirectParametersBindGroups::new();
2552
2553 for (phase_type_id, phase_indirect_parameters_buffer) in indirect_parameters_buffers.iter() {
2554 build_indirect_parameters_bind_groups.insert(
2555 *phase_type_id,
2556 PhaseBuildIndirectParametersBindGroups {
2557 reset_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2558 .indexed
2559 .batch_sets_buffer(),)
2560 {
2561 (Some(indexed_batch_sets_buffer),) => Some(
2562 render_device.create_bind_group(
2563 "reset_indexed_indirect_batch_sets_bind_group",
2564 &pipeline_cache.get_bind_group_layout(
2567 &pipelines
2568 .early_phase
2569 .reset_indirect_batch_sets
2570 .bind_group_layout,
2571 ),
2572 &BindGroupEntries::sequential((
2573 indexed_batch_sets_buffer.as_entire_binding(),
2574 )),
2575 ),
2576 ),
2577 _ => None,
2578 },
2579
2580 reset_non_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2581 .non_indexed
2582 .batch_sets_buffer(),)
2583 {
2584 (Some(non_indexed_batch_sets_buffer),) => Some(
2585 render_device.create_bind_group(
2586 "reset_non_indexed_indirect_batch_sets_bind_group",
2587 &pipeline_cache.get_bind_group_layout(
2590 &pipelines
2591 .early_phase
2592 .reset_indirect_batch_sets
2593 .bind_group_layout,
2594 ),
2595 &BindGroupEntries::sequential((
2596 non_indexed_batch_sets_buffer.as_entire_binding(),
2597 )),
2598 ),
2599 ),
2600 _ => None,
2601 },
2602
2603 build_indexed_indirect: match (
2604 phase_indirect_parameters_buffer
2605 .indexed
2606 .cpu_metadata_buffer(),
2607 phase_indirect_parameters_buffer
2608 .indexed
2609 .gpu_metadata_buffer(),
2610 phase_indirect_parameters_buffer.indexed.data_buffer(),
2611 phase_indirect_parameters_buffer.indexed.batch_sets_buffer(),
2612 ) {
2613 (
2614 Some(indexed_indirect_parameters_cpu_metadata_buffer),
2615 Some(indexed_indirect_parameters_gpu_metadata_buffer),
2616 Some(indexed_indirect_parameters_data_buffer),
2617 Some(indexed_batch_sets_buffer),
2618 ) => Some(
2619 render_device.create_bind_group(
2620 "build_indexed_indirect_parameters_bind_group",
2621 &pipeline_cache.get_bind_group_layout(
2624 &pipelines
2625 .gpu_frustum_culling_build_indexed_indirect_params
2626 .bind_group_layout,
2627 ),
2628 &BindGroupEntries::sequential((
2629 current_input_buffer.as_entire_binding(),
2630 BufferBinding {
2633 buffer: indexed_indirect_parameters_cpu_metadata_buffer,
2634 offset: 0,
2635 size: NonZeroU64::new(
2636 phase_indirect_parameters_buffer.indexed.batch_count()
2637 as u64
2638 * size_of::<IndirectParametersCpuMetadata>() as u64,
2639 ),
2640 },
2641 BufferBinding {
2642 buffer: indexed_indirect_parameters_gpu_metadata_buffer,
2643 offset: 0,
2644 size: NonZeroU64::new(
2645 phase_indirect_parameters_buffer.indexed.batch_count()
2646 as u64
2647 * size_of::<IndirectParametersGpuMetadata>() as u64,
2648 ),
2649 },
2650 indexed_batch_sets_buffer.as_entire_binding(),
2651 indexed_indirect_parameters_data_buffer.as_entire_binding(),
2652 )),
2653 ),
2654 ),
2655 _ => None,
2656 },
2657
2658 build_non_indexed_indirect: match (
2659 phase_indirect_parameters_buffer
2660 .non_indexed
2661 .cpu_metadata_buffer(),
2662 phase_indirect_parameters_buffer
2663 .non_indexed
2664 .gpu_metadata_buffer(),
2665 phase_indirect_parameters_buffer.non_indexed.data_buffer(),
2666 phase_indirect_parameters_buffer
2667 .non_indexed
2668 .batch_sets_buffer(),
2669 ) {
2670 (
2671 Some(non_indexed_indirect_parameters_cpu_metadata_buffer),
2672 Some(non_indexed_indirect_parameters_gpu_metadata_buffer),
2673 Some(non_indexed_indirect_parameters_data_buffer),
2674 Some(non_indexed_batch_sets_buffer),
2675 ) => Some(
2676 render_device.create_bind_group(
2677 "build_non_indexed_indirect_parameters_bind_group",
2678 &pipeline_cache.get_bind_group_layout(
2681 &pipelines
2682 .gpu_frustum_culling_build_non_indexed_indirect_params
2683 .bind_group_layout,
2684 ),
2685 &BindGroupEntries::sequential((
2686 current_input_buffer.as_entire_binding(),
2687 BufferBinding {
2690 buffer: non_indexed_indirect_parameters_cpu_metadata_buffer,
2691 offset: 0,
2692 size: NonZeroU64::new(
2693 phase_indirect_parameters_buffer.non_indexed.batch_count()
2694 as u64
2695 * size_of::<IndirectParametersCpuMetadata>() as u64,
2696 ),
2697 },
2698 BufferBinding {
2699 buffer: non_indexed_indirect_parameters_gpu_metadata_buffer,
2700 offset: 0,
2701 size: NonZeroU64::new(
2702 phase_indirect_parameters_buffer.non_indexed.batch_count()
2703 as u64
2704 * size_of::<IndirectParametersGpuMetadata>() as u64,
2705 ),
2706 },
2707 non_indexed_batch_sets_buffer.as_entire_binding(),
2708 non_indexed_indirect_parameters_data_buffer.as_entire_binding(),
2709 )),
2710 ),
2711 ),
2712 _ => None,
2713 },
2714 },
2715 );
2716 }
2717
2718 commands.insert_resource(build_indirect_parameters_bind_groups);
2719}
2720
2721pub fn write_mesh_culling_data_buffer(
2723 render_device: Res<RenderDevice>,
2724 render_queue: Res<RenderQueue>,
2725 mut mesh_culling_data_buffer: ResMut<MeshCullingDataBuffer>,
2726) {
2727 mesh_culling_data_buffer.write_buffer(&render_device, &render_queue);
2728}