1use core::num::{NonZero, NonZeroU64};
10
11use bevy_app::{App, Plugin};
12use bevy_asset::{embedded_asset, load_embedded_asset, Handle};
13use bevy_core_pipeline::{
14 core_3d::graph::{Core3d, Node3d},
15 experimental::mip_generation::ViewDepthPyramid,
16 prepass::{DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms},
17};
18use bevy_derive::{Deref, DerefMut};
19use bevy_ecs::{
20 component::Component,
21 entity::Entity,
22 prelude::resource_exists,
23 query::{Has, Or, QueryState, With, Without},
24 resource::Resource,
25 schedule::IntoScheduleConfigs as _,
26 system::{lifetimeless::Read, Commands, Query, Res, ResMut},
27 world::{FromWorld, World},
28};
29use bevy_log::warn_once;
30use bevy_render::{
31 batching::gpu_preprocessing::{
32 BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingMode,
33 GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers,
34 IndirectParametersCpuMetadata, IndirectParametersGpuMetadata, IndirectParametersIndexed,
35 IndirectParametersNonIndexed, LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem,
36 PreprocessWorkItemBuffers, UntypedPhaseBatchedInstanceBuffers,
37 UntypedPhaseIndirectParametersBuffers,
38 },
39 diagnostic::RecordDiagnostics,
40 experimental::occlusion_culling::OcclusionCulling,
41 render_graph::{Node, NodeRunError, RenderGraphContext, RenderGraphExt},
42 render_resource::{
43 binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer},
44 BindGroup, BindGroupEntries, BindGroupLayoutDescriptor, BindingResource, Buffer,
45 BufferBinding, CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor,
46 DynamicBindGroupLayoutEntries, PipelineCache, PushConstantRange, RawBufferVec,
47 ShaderStages, ShaderType, SpecializedComputePipeline, SpecializedComputePipelines,
48 TextureSampleType, UninitBufferVec,
49 },
50 renderer::{RenderContext, RenderDevice, RenderQueue},
51 settings::WgpuFeatures,
52 view::{ExtractedView, NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms},
53 Render, RenderApp, RenderSystems,
54};
55use bevy_shader::Shader;
56use bevy_utils::{default, TypeIdMap};
57use bitflags::bitflags;
58use smallvec::{smallvec, SmallVec};
59use tracing::warn;
60
61use crate::{
62 graph::NodePbr, MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform,
63};
64
65use super::{ShadowView, ViewLightEntities};
66
67const WORKGROUP_SIZE: usize = 64;
69
70pub struct GpuMeshPreprocessPlugin {
75 pub use_gpu_instance_buffer_builder: bool,
80}
81
82#[derive(Default)]
86pub struct ClearIndirectParametersMetadataNode;
87
88pub struct EarlyGpuPreprocessNode {
96 view_query: QueryState<
97 (
98 Read<ExtractedView>,
99 Option<Read<PreprocessBindGroups>>,
100 Option<Read<ViewUniformOffset>>,
101 Has<NoIndirectDrawing>,
102 Has<OcclusionCulling>,
103 ),
104 Without<SkipGpuPreprocess>,
105 >,
106 main_view_query: QueryState<Read<ViewLightEntities>>,
107}
108
109pub struct LateGpuPreprocessNode {
117 view_query: QueryState<
118 (
119 Read<ExtractedView>,
120 Read<PreprocessBindGroups>,
121 Read<ViewUniformOffset>,
122 ),
123 (
124 Without<SkipGpuPreprocess>,
125 Without<NoIndirectDrawing>,
126 With<OcclusionCulling>,
127 With<DepthPrepass>,
128 ),
129 >,
130}
131
132pub struct EarlyPrepassBuildIndirectParametersNode {
140 view_query: QueryState<
141 Read<PreprocessBindGroups>,
142 (
143 Without<SkipGpuPreprocess>,
144 Without<NoIndirectDrawing>,
145 Or<(With<DepthPrepass>, With<ShadowView>)>,
146 ),
147 >,
148}
149
150pub struct LatePrepassBuildIndirectParametersNode {
159 view_query: QueryState<
160 Read<PreprocessBindGroups>,
161 (
162 Without<SkipGpuPreprocess>,
163 Without<NoIndirectDrawing>,
164 Or<(With<DepthPrepass>, With<ShadowView>)>,
165 With<OcclusionCulling>,
166 ),
167 >,
168}
169
170pub struct MainBuildIndirectParametersNode {
179 view_query: QueryState<
180 Read<PreprocessBindGroups>,
181 (Without<SkipGpuPreprocess>, Without<NoIndirectDrawing>),
182 >,
183}
184
185#[derive(Resource)]
188pub struct PreprocessPipelines {
189 pub direct_preprocess: PreprocessPipeline,
192 pub gpu_frustum_culling_preprocess: PreprocessPipeline,
197 pub early_gpu_occlusion_culling_preprocess: PreprocessPipeline,
202 pub late_gpu_occlusion_culling_preprocess: PreprocessPipeline,
207 pub gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
210 pub gpu_frustum_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
214 pub early_phase: PreprocessPhasePipelines,
217 pub late_phase: PreprocessPhasePipelines,
221 pub main_phase: PreprocessPhasePipelines,
223}
224
225#[derive(Clone)]
229pub struct PreprocessPhasePipelines {
230 pub reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline,
233 pub gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
238 pub gpu_occlusion_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
243}
244
245pub struct PreprocessPipeline {
247 pub bind_group_layout: BindGroupLayoutDescriptor,
249 pub shader: Handle<Shader>,
251 pub pipeline_id: Option<CachedComputePipelineId>,
255}
256
257#[derive(Clone)]
262pub struct ResetIndirectBatchSetsPipeline {
263 pub bind_group_layout: BindGroupLayoutDescriptor,
265 pub shader: Handle<Shader>,
267 pub pipeline_id: Option<CachedComputePipelineId>,
271}
272
273#[derive(Clone)]
275pub struct BuildIndirectParametersPipeline {
276 pub bind_group_layout: BindGroupLayoutDescriptor,
278 pub shader: Handle<Shader>,
280 pub pipeline_id: Option<CachedComputePipelineId>,
284}
285
286bitflags! {
287 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
289 pub struct PreprocessPipelineKey: u8 {
290 const FRUSTUM_CULLING = 1;
294 const OCCLUSION_CULLING = 2;
298 const EARLY_PHASE = 4;
302 }
303
304 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
306 pub struct BuildIndirectParametersPipelineKey: u8 {
307 const INDEXED = 1;
312 const MULTI_DRAW_INDIRECT_COUNT_SUPPORTED = 2;
316 const OCCLUSION_CULLING = 4;
320 const EARLY_PHASE = 8;
324 const LATE_PHASE = 16;
328 const MAIN_PHASE = 32;
334 }
335}
336
337#[derive(Component, Clone, Deref, DerefMut)]
344pub struct PreprocessBindGroups(pub TypeIdMap<PhasePreprocessBindGroups>);
345
346#[derive(Clone)]
349pub enum PhasePreprocessBindGroups {
350 Direct(BindGroup),
356
357 IndirectFrustumCulling {
363 indexed: Option<BindGroup>,
365 non_indexed: Option<BindGroup>,
367 },
368
369 IndirectOcclusionCulling {
377 early_indexed: Option<BindGroup>,
380 early_non_indexed: Option<BindGroup>,
383 late_indexed: Option<BindGroup>,
386 late_non_indexed: Option<BindGroup>,
389 },
390}
391
392#[derive(Resource, Default, Deref, DerefMut)]
398pub struct BuildIndirectParametersBindGroups(pub TypeIdMap<PhaseBuildIndirectParametersBindGroups>);
399
400impl BuildIndirectParametersBindGroups {
401 pub fn new() -> BuildIndirectParametersBindGroups {
403 Self::default()
404 }
405}
406
407pub struct PhaseBuildIndirectParametersBindGroups {
410 reset_indexed_indirect_batch_sets: Option<BindGroup>,
413 reset_non_indexed_indirect_batch_sets: Option<BindGroup>,
416 build_indexed_indirect: Option<BindGroup>,
419 build_non_indexed_indirect: Option<BindGroup>,
422}
423
424#[derive(Component, Default)]
427pub struct SkipGpuPreprocess;
428
429impl Plugin for GpuMeshPreprocessPlugin {
430 fn build(&self, app: &mut App) {
431 embedded_asset!(app, "mesh_preprocess.wgsl");
432 embedded_asset!(app, "reset_indirect_batch_sets.wgsl");
433 embedded_asset!(app, "build_indirect_params.wgsl");
434 }
435
436 fn finish(&self, app: &mut App) {
437 let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
438 return;
439 };
440
441 let gpu_preprocessing_support = render_app.world().resource::<GpuPreprocessingSupport>();
444 if !self.use_gpu_instance_buffer_builder || !gpu_preprocessing_support.is_available() {
445 return;
446 }
447
448 render_app
449 .init_resource::<PreprocessPipelines>()
450 .init_resource::<SpecializedComputePipelines<PreprocessPipeline>>()
451 .init_resource::<SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>>()
452 .init_resource::<SpecializedComputePipelines<BuildIndirectParametersPipeline>>()
453 .add_systems(
454 Render,
455 (
456 prepare_preprocess_pipelines.in_set(RenderSystems::Prepare),
457 prepare_preprocess_bind_groups
458 .run_if(resource_exists::<BatchedInstanceBuffers<
459 MeshUniform,
460 MeshInputUniform
461 >>)
462 .in_set(RenderSystems::PrepareBindGroups),
463 write_mesh_culling_data_buffer.in_set(RenderSystems::PrepareResourcesFlush),
464 ),
465 )
466 .add_render_graph_node::<ClearIndirectParametersMetadataNode>(
467 Core3d,
468 NodePbr::ClearIndirectParametersMetadata
469 )
470 .add_render_graph_node::<EarlyGpuPreprocessNode>(Core3d, NodePbr::EarlyGpuPreprocess)
471 .add_render_graph_node::<LateGpuPreprocessNode>(Core3d, NodePbr::LateGpuPreprocess)
472 .add_render_graph_node::<EarlyPrepassBuildIndirectParametersNode>(
473 Core3d,
474 NodePbr::EarlyPrepassBuildIndirectParameters,
475 )
476 .add_render_graph_node::<LatePrepassBuildIndirectParametersNode>(
477 Core3d,
478 NodePbr::LatePrepassBuildIndirectParameters,
479 )
480 .add_render_graph_node::<MainBuildIndirectParametersNode>(
481 Core3d,
482 NodePbr::MainBuildIndirectParameters,
483 )
484 .add_render_graph_edges(
485 Core3d,
486 (
487 NodePbr::ClearIndirectParametersMetadata,
488 NodePbr::EarlyGpuPreprocess,
489 NodePbr::EarlyPrepassBuildIndirectParameters,
490 Node3d::EarlyPrepass,
491 Node3d::EarlyDeferredPrepass,
492 Node3d::EarlyDownsampleDepth,
493 NodePbr::LateGpuPreprocess,
494 NodePbr::LatePrepassBuildIndirectParameters,
495 Node3d::LatePrepass,
496 Node3d::LateDeferredPrepass,
497 NodePbr::MainBuildIndirectParameters,
498 Node3d::StartMainPass,
499 ),
500 ).add_render_graph_edges(
501 Core3d,
502 (
503 NodePbr::EarlyPrepassBuildIndirectParameters,
504 NodePbr::EarlyShadowPass,
505 Node3d::EarlyDownsampleDepth,
506 )
507 ).add_render_graph_edges(
508 Core3d,
509 (
510 NodePbr::LatePrepassBuildIndirectParameters,
511 NodePbr::LateShadowPass,
512 NodePbr::MainBuildIndirectParameters,
513 )
514 );
515 }
516}
517
518impl Node for ClearIndirectParametersMetadataNode {
519 fn run<'w>(
520 &self,
521 _: &mut RenderGraphContext,
522 render_context: &mut RenderContext<'w>,
523 world: &'w World,
524 ) -> Result<(), NodeRunError> {
525 let Some(indirect_parameters_buffers) = world.get_resource::<IndirectParametersBuffers>()
526 else {
527 return Ok(());
528 };
529
530 for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() {
532 if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
533 .indexed
534 .gpu_metadata_buffer()
535 {
536 render_context.command_encoder().clear_buffer(
537 indexed_gpu_metadata_buffer,
538 0,
539 Some(
540 phase_indirect_parameters_buffers.indexed.batch_count() as u64
541 * size_of::<IndirectParametersGpuMetadata>() as u64,
542 ),
543 );
544 }
545
546 if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
547 .non_indexed
548 .gpu_metadata_buffer()
549 {
550 render_context.command_encoder().clear_buffer(
551 non_indexed_gpu_metadata_buffer,
552 0,
553 Some(
554 phase_indirect_parameters_buffers.non_indexed.batch_count() as u64
555 * size_of::<IndirectParametersGpuMetadata>() as u64,
556 ),
557 );
558 }
559 }
560
561 Ok(())
562 }
563}
564
565impl FromWorld for EarlyGpuPreprocessNode {
566 fn from_world(world: &mut World) -> Self {
567 Self {
568 view_query: QueryState::new(world),
569 main_view_query: QueryState::new(world),
570 }
571 }
572}
573
574impl Node for EarlyGpuPreprocessNode {
575 fn update(&mut self, world: &mut World) {
576 self.view_query.update_archetypes(world);
577 self.main_view_query.update_archetypes(world);
578 }
579
580 fn run<'w>(
581 &self,
582 graph: &mut RenderGraphContext,
583 render_context: &mut RenderContext<'w>,
584 world: &'w World,
585 ) -> Result<(), NodeRunError> {
586 let diagnostics = render_context.diagnostic_recorder();
587
588 let batched_instance_buffers =
590 world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
591
592 let pipeline_cache = world.resource::<PipelineCache>();
593 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
594
595 let mut compute_pass =
596 render_context
597 .command_encoder()
598 .begin_compute_pass(&ComputePassDescriptor {
599 label: Some("early_mesh_preprocessing"),
600 timestamp_writes: None,
601 });
602 let pass_span = diagnostics.pass_span(&mut compute_pass, "early_mesh_preprocessing");
603
604 let mut all_views: SmallVec<[_; 8]> = SmallVec::new();
605 all_views.push(graph.view_entity());
606 if let Ok(shadow_cascade_views) =
607 self.main_view_query.get_manual(world, graph.view_entity())
608 {
609 all_views.extend(shadow_cascade_views.lights.iter().copied());
610 }
611
612 for view_entity in all_views {
615 let Ok((
616 view,
617 bind_groups,
618 view_uniform_offset,
619 no_indirect_drawing,
620 occlusion_culling,
621 )) = self.view_query.get_manual(world, view_entity)
622 else {
623 continue;
624 };
625
626 let Some(bind_groups) = bind_groups else {
627 continue;
628 };
629 let Some(view_uniform_offset) = view_uniform_offset else {
630 continue;
631 };
632
633 let maybe_pipeline_id = if no_indirect_drawing {
636 preprocess_pipelines.direct_preprocess.pipeline_id
637 } else if occlusion_culling {
638 preprocess_pipelines
639 .early_gpu_occlusion_culling_preprocess
640 .pipeline_id
641 } else {
642 preprocess_pipelines
643 .gpu_frustum_culling_preprocess
644 .pipeline_id
645 };
646
647 let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
649 warn!("The build mesh uniforms pipeline wasn't ready");
650 continue;
651 };
652
653 let Some(preprocess_pipeline) =
654 pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
655 else {
656 continue;
658 };
659
660 compute_pass.set_pipeline(preprocess_pipeline);
661
662 for (phase_type_id, batched_phase_instance_buffers) in
664 &batched_instance_buffers.phase_instance_buffers
665 {
666 let Some(work_item_buffers) = batched_phase_instance_buffers
668 .work_item_buffers
669 .get(&view.retained_view_entity)
670 else {
671 continue;
672 };
673
674 let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else {
676 continue;
677 };
678
679 let dynamic_offsets = [view_uniform_offset.offset];
683
684 match *phase_bind_groups {
686 PhasePreprocessBindGroups::Direct(ref bind_group) => {
687 let PreprocessWorkItemBuffers::Direct(work_item_buffer) = work_item_buffers
690 else {
691 continue;
692 };
693 compute_pass.set_bind_group(0, bind_group, &dynamic_offsets);
694 let workgroup_count = work_item_buffer.len().div_ceil(WORKGROUP_SIZE);
695 if workgroup_count > 0 {
696 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
697 }
698 }
699
700 PhasePreprocessBindGroups::IndirectFrustumCulling {
701 indexed: ref maybe_indexed_bind_group,
702 non_indexed: ref maybe_non_indexed_bind_group,
703 }
704 | PhasePreprocessBindGroups::IndirectOcclusionCulling {
705 early_indexed: ref maybe_indexed_bind_group,
706 early_non_indexed: ref maybe_non_indexed_bind_group,
707 ..
708 } => {
709 let PreprocessWorkItemBuffers::Indirect {
712 indexed: indexed_buffer,
713 non_indexed: non_indexed_buffer,
714 ..
715 } = work_item_buffers
716 else {
717 continue;
718 };
719
720 if let Some(indexed_bind_group) = maybe_indexed_bind_group {
722 if let PreprocessWorkItemBuffers::Indirect {
723 gpu_occlusion_culling:
724 Some(GpuOcclusionCullingWorkItemBuffers {
725 late_indirect_parameters_indexed_offset,
726 ..
727 }),
728 ..
729 } = *work_item_buffers
730 {
731 compute_pass.set_push_constants(
732 0,
733 bytemuck::bytes_of(&late_indirect_parameters_indexed_offset),
734 );
735 }
736
737 compute_pass.set_bind_group(0, indexed_bind_group, &dynamic_offsets);
738 let workgroup_count = indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
739 if workgroup_count > 0 {
740 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
741 }
742 }
743
744 if let Some(non_indexed_bind_group) = maybe_non_indexed_bind_group {
746 if let PreprocessWorkItemBuffers::Indirect {
747 gpu_occlusion_culling:
748 Some(GpuOcclusionCullingWorkItemBuffers {
749 late_indirect_parameters_non_indexed_offset,
750 ..
751 }),
752 ..
753 } = *work_item_buffers
754 {
755 compute_pass.set_push_constants(
756 0,
757 bytemuck::bytes_of(
758 &late_indirect_parameters_non_indexed_offset,
759 ),
760 );
761 }
762
763 compute_pass.set_bind_group(
764 0,
765 non_indexed_bind_group,
766 &dynamic_offsets,
767 );
768 let workgroup_count = non_indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
769 if workgroup_count > 0 {
770 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
771 }
772 }
773 }
774 }
775 }
776 }
777
778 pass_span.end(&mut compute_pass);
779
780 Ok(())
781 }
782}
783
784impl FromWorld for EarlyPrepassBuildIndirectParametersNode {
785 fn from_world(world: &mut World) -> Self {
786 Self {
787 view_query: QueryState::new(world),
788 }
789 }
790}
791
792impl FromWorld for LatePrepassBuildIndirectParametersNode {
793 fn from_world(world: &mut World) -> Self {
794 Self {
795 view_query: QueryState::new(world),
796 }
797 }
798}
799
800impl FromWorld for MainBuildIndirectParametersNode {
801 fn from_world(world: &mut World) -> Self {
802 Self {
803 view_query: QueryState::new(world),
804 }
805 }
806}
807
808impl FromWorld for LateGpuPreprocessNode {
809 fn from_world(world: &mut World) -> Self {
810 Self {
811 view_query: QueryState::new(world),
812 }
813 }
814}
815
816impl Node for LateGpuPreprocessNode {
817 fn update(&mut self, world: &mut World) {
818 self.view_query.update_archetypes(world);
819 }
820
821 fn run<'w>(
822 &self,
823 _: &mut RenderGraphContext,
824 render_context: &mut RenderContext<'w>,
825 world: &'w World,
826 ) -> Result<(), NodeRunError> {
827 let diagnostics = render_context.diagnostic_recorder();
828
829 let batched_instance_buffers =
831 world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
832
833 let pipeline_cache = world.resource::<PipelineCache>();
834 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
835
836 let maybe_pipeline_id = preprocess_pipelines
837 .late_gpu_occlusion_culling_preprocess
838 .pipeline_id;
839
840 let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
842 warn_once!("The build mesh uniforms pipeline wasn't ready");
843 return Ok(());
844 };
845
846 let Some(preprocess_pipeline) = pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
847 else {
848 return Ok(());
850 };
851
852 let mut compute_pass =
853 render_context
854 .command_encoder()
855 .begin_compute_pass(&ComputePassDescriptor {
856 label: Some("late_mesh_preprocessing"),
857 timestamp_writes: None,
858 });
859
860 let pass_span = diagnostics.pass_span(&mut compute_pass, "late_mesh_preprocessing");
861
862 for (view, bind_groups, view_uniform_offset) in self.view_query.iter_manual(world) {
864 compute_pass.set_pipeline(preprocess_pipeline);
865
866 for (phase_type_id, batched_phase_instance_buffers) in
869 &batched_instance_buffers.phase_instance_buffers
870 {
871 let UntypedPhaseBatchedInstanceBuffers {
872 ref work_item_buffers,
873 ref late_indexed_indirect_parameters_buffer,
874 ref late_non_indexed_indirect_parameters_buffer,
875 ..
876 } = *batched_phase_instance_buffers;
877
878 let Some(phase_work_item_buffers) =
880 work_item_buffers.get(&view.retained_view_entity)
881 else {
882 continue;
883 };
884
885 let (
886 PreprocessWorkItemBuffers::Indirect {
887 gpu_occlusion_culling:
888 Some(GpuOcclusionCullingWorkItemBuffers {
889 late_indirect_parameters_indexed_offset,
890 late_indirect_parameters_non_indexed_offset,
891 ..
892 }),
893 ..
894 },
895 Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
896 late_indexed: maybe_late_indexed_bind_group,
897 late_non_indexed: maybe_late_non_indexed_bind_group,
898 ..
899 }),
900 Some(late_indexed_indirect_parameters_buffer),
901 Some(late_non_indexed_indirect_parameters_buffer),
902 ) = (
903 phase_work_item_buffers,
904 bind_groups.get(phase_type_id),
905 late_indexed_indirect_parameters_buffer.buffer(),
906 late_non_indexed_indirect_parameters_buffer.buffer(),
907 )
908 else {
909 continue;
910 };
911
912 let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![];
913 dynamic_offsets.push(view_uniform_offset.offset);
914
915 if let Some(late_indexed_bind_group) = maybe_late_indexed_bind_group {
922 compute_pass.set_push_constants(
923 0,
924 bytemuck::bytes_of(late_indirect_parameters_indexed_offset),
925 );
926
927 compute_pass.set_bind_group(0, late_indexed_bind_group, &dynamic_offsets);
928 compute_pass.dispatch_workgroups_indirect(
929 late_indexed_indirect_parameters_buffer,
930 (*late_indirect_parameters_indexed_offset as u64)
931 * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
932 );
933 }
934
935 if let Some(late_non_indexed_bind_group) = maybe_late_non_indexed_bind_group {
937 compute_pass.set_push_constants(
938 0,
939 bytemuck::bytes_of(late_indirect_parameters_non_indexed_offset),
940 );
941
942 compute_pass.set_bind_group(0, late_non_indexed_bind_group, &dynamic_offsets);
943 compute_pass.dispatch_workgroups_indirect(
944 late_non_indexed_indirect_parameters_buffer,
945 (*late_indirect_parameters_non_indexed_offset as u64)
946 * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
947 );
948 }
949 }
950 }
951
952 pass_span.end(&mut compute_pass);
953
954 Ok(())
955 }
956}
957
958impl Node for EarlyPrepassBuildIndirectParametersNode {
959 fn update(&mut self, world: &mut World) {
960 self.view_query.update_archetypes(world);
961 }
962
963 fn run<'w>(
964 &self,
965 _: &mut RenderGraphContext,
966 render_context: &mut RenderContext<'w>,
967 world: &'w World,
968 ) -> Result<(), NodeRunError> {
969 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
970
971 if self.view_query.iter_manual(world).next().is_none() {
974 return Ok(());
975 }
976
977 run_build_indirect_parameters_node(
978 render_context,
979 world,
980 &preprocess_pipelines.early_phase,
981 "early_prepass_indirect_parameters_building",
982 )
983 }
984}
985
986impl Node for LatePrepassBuildIndirectParametersNode {
987 fn update(&mut self, world: &mut World) {
988 self.view_query.update_archetypes(world);
989 }
990
991 fn run<'w>(
992 &self,
993 _: &mut RenderGraphContext,
994 render_context: &mut RenderContext<'w>,
995 world: &'w World,
996 ) -> Result<(), NodeRunError> {
997 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
998
999 if self.view_query.iter_manual(world).next().is_none() {
1002 return Ok(());
1003 }
1004
1005 run_build_indirect_parameters_node(
1006 render_context,
1007 world,
1008 &preprocess_pipelines.late_phase,
1009 "late_prepass_indirect_parameters_building",
1010 )
1011 }
1012}
1013
1014impl Node for MainBuildIndirectParametersNode {
1015 fn update(&mut self, world: &mut World) {
1016 self.view_query.update_archetypes(world);
1017 }
1018
1019 fn run<'w>(
1020 &self,
1021 _: &mut RenderGraphContext,
1022 render_context: &mut RenderContext<'w>,
1023 world: &'w World,
1024 ) -> Result<(), NodeRunError> {
1025 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
1026
1027 run_build_indirect_parameters_node(
1028 render_context,
1029 world,
1030 &preprocess_pipelines.main_phase,
1031 "main_indirect_parameters_building",
1032 )
1033 }
1034}
1035
1036fn run_build_indirect_parameters_node(
1037 render_context: &mut RenderContext,
1038 world: &World,
1039 preprocess_phase_pipelines: &PreprocessPhasePipelines,
1040 label: &'static str,
1041) -> Result<(), NodeRunError> {
1042 let Some(build_indirect_params_bind_groups) =
1043 world.get_resource::<BuildIndirectParametersBindGroups>()
1044 else {
1045 return Ok(());
1046 };
1047
1048 let diagnostics = render_context.diagnostic_recorder();
1049
1050 let pipeline_cache = world.resource::<PipelineCache>();
1051 let indirect_parameters_buffers = world.resource::<IndirectParametersBuffers>();
1052
1053 let mut compute_pass =
1054 render_context
1055 .command_encoder()
1056 .begin_compute_pass(&ComputePassDescriptor {
1057 label: Some(label),
1058 timestamp_writes: None,
1059 });
1060 let pass_span = diagnostics.pass_span(&mut compute_pass, label);
1061
1062 let (
1064 Some(reset_indirect_batch_sets_pipeline_id),
1065 Some(build_indexed_indirect_params_pipeline_id),
1066 Some(build_non_indexed_indirect_params_pipeline_id),
1067 ) = (
1068 preprocess_phase_pipelines
1069 .reset_indirect_batch_sets
1070 .pipeline_id,
1071 preprocess_phase_pipelines
1072 .gpu_occlusion_culling_build_indexed_indirect_params
1073 .pipeline_id,
1074 preprocess_phase_pipelines
1075 .gpu_occlusion_culling_build_non_indexed_indirect_params
1076 .pipeline_id,
1077 )
1078 else {
1079 warn!("The build indirect parameters pipelines weren't ready");
1080 pass_span.end(&mut compute_pass);
1081 return Ok(());
1082 };
1083
1084 let (
1085 Some(reset_indirect_batch_sets_pipeline),
1086 Some(build_indexed_indirect_params_pipeline),
1087 Some(build_non_indexed_indirect_params_pipeline),
1088 ) = (
1089 pipeline_cache.get_compute_pipeline(reset_indirect_batch_sets_pipeline_id),
1090 pipeline_cache.get_compute_pipeline(build_indexed_indirect_params_pipeline_id),
1091 pipeline_cache.get_compute_pipeline(build_non_indexed_indirect_params_pipeline_id),
1092 )
1093 else {
1094 pass_span.end(&mut compute_pass);
1096 return Ok(());
1097 };
1098
1099 for (phase_type_id, phase_build_indirect_params_bind_groups) in
1102 build_indirect_params_bind_groups.iter()
1103 {
1104 let Some(phase_indirect_parameters_buffers) =
1105 indirect_parameters_buffers.get(phase_type_id)
1106 else {
1107 continue;
1108 };
1109
1110 if let (
1112 Some(reset_indexed_indirect_batch_sets_bind_group),
1113 Some(build_indirect_indexed_params_bind_group),
1114 ) = (
1115 &phase_build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets,
1116 &phase_build_indirect_params_bind_groups.build_indexed_indirect,
1117 ) {
1118 compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1119 compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]);
1120 let workgroup_count = phase_indirect_parameters_buffers
1121 .batch_set_count(true)
1122 .div_ceil(WORKGROUP_SIZE);
1123 if workgroup_count > 0 {
1124 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1125 }
1126
1127 compute_pass.set_pipeline(build_indexed_indirect_params_pipeline);
1128 compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]);
1129 let workgroup_count = phase_indirect_parameters_buffers
1130 .indexed
1131 .batch_count()
1132 .div_ceil(WORKGROUP_SIZE);
1133 if workgroup_count > 0 {
1134 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1135 }
1136 }
1137
1138 if let (
1140 Some(reset_non_indexed_indirect_batch_sets_bind_group),
1141 Some(build_indirect_non_indexed_params_bind_group),
1142 ) = (
1143 &phase_build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets,
1144 &phase_build_indirect_params_bind_groups.build_non_indexed_indirect,
1145 ) {
1146 compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1147 compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]);
1148 let workgroup_count = phase_indirect_parameters_buffers
1149 .batch_set_count(false)
1150 .div_ceil(WORKGROUP_SIZE);
1151 if workgroup_count > 0 {
1152 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1153 }
1154
1155 compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline);
1156 compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]);
1157 let workgroup_count = phase_indirect_parameters_buffers
1158 .non_indexed
1159 .batch_count()
1160 .div_ceil(WORKGROUP_SIZE);
1161 if workgroup_count > 0 {
1162 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1163 }
1164 }
1165 }
1166
1167 pass_span.end(&mut compute_pass);
1168
1169 Ok(())
1170}
1171
1172impl PreprocessPipelines {
1173 pub(crate) fn pipelines_are_loaded(
1176 &self,
1177 pipeline_cache: &PipelineCache,
1178 preprocessing_support: &GpuPreprocessingSupport,
1179 ) -> bool {
1180 match preprocessing_support.max_supported_mode {
1181 GpuPreprocessingMode::None => false,
1182 GpuPreprocessingMode::PreprocessingOnly => {
1183 self.direct_preprocess.is_loaded(pipeline_cache)
1184 && self
1185 .gpu_frustum_culling_preprocess
1186 .is_loaded(pipeline_cache)
1187 }
1188 GpuPreprocessingMode::Culling => {
1189 self.direct_preprocess.is_loaded(pipeline_cache)
1190 && self
1191 .gpu_frustum_culling_preprocess
1192 .is_loaded(pipeline_cache)
1193 && self
1194 .early_gpu_occlusion_culling_preprocess
1195 .is_loaded(pipeline_cache)
1196 && self
1197 .late_gpu_occlusion_culling_preprocess
1198 .is_loaded(pipeline_cache)
1199 && self
1200 .gpu_frustum_culling_build_indexed_indirect_params
1201 .is_loaded(pipeline_cache)
1202 && self
1203 .gpu_frustum_culling_build_non_indexed_indirect_params
1204 .is_loaded(pipeline_cache)
1205 && self.early_phase.is_loaded(pipeline_cache)
1206 && self.late_phase.is_loaded(pipeline_cache)
1207 && self.main_phase.is_loaded(pipeline_cache)
1208 }
1209 }
1210 }
1211}
1212
1213impl PreprocessPhasePipelines {
1214 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1215 self.reset_indirect_batch_sets.is_loaded(pipeline_cache)
1216 && self
1217 .gpu_occlusion_culling_build_indexed_indirect_params
1218 .is_loaded(pipeline_cache)
1219 && self
1220 .gpu_occlusion_culling_build_non_indexed_indirect_params
1221 .is_loaded(pipeline_cache)
1222 }
1223}
1224
1225impl PreprocessPipeline {
1226 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1227 self.pipeline_id
1228 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1229 }
1230}
1231
1232impl ResetIndirectBatchSetsPipeline {
1233 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1234 self.pipeline_id
1235 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1236 }
1237}
1238
1239impl BuildIndirectParametersPipeline {
1240 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1243 self.pipeline_id
1244 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1245 }
1246}
1247
1248impl SpecializedComputePipeline for PreprocessPipeline {
1249 type Key = PreprocessPipelineKey;
1250
1251 fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1252 let mut shader_defs = vec!["WRITE_INDIRECT_PARAMETERS_METADATA".into()];
1253 if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1254 shader_defs.push("INDIRECT".into());
1255 shader_defs.push("FRUSTUM_CULLING".into());
1256 }
1257 if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1258 shader_defs.push("OCCLUSION_CULLING".into());
1259 if key.contains(PreprocessPipelineKey::EARLY_PHASE) {
1260 shader_defs.push("EARLY_PHASE".into());
1261 } else {
1262 shader_defs.push("LATE_PHASE".into());
1263 }
1264 }
1265
1266 ComputePipelineDescriptor {
1267 label: Some(
1268 format!(
1269 "mesh preprocessing ({})",
1270 if key.contains(
1271 PreprocessPipelineKey::OCCLUSION_CULLING
1272 | PreprocessPipelineKey::EARLY_PHASE
1273 ) {
1274 "early GPU occlusion culling"
1275 } else if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1276 "late GPU occlusion culling"
1277 } else if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1278 "GPU frustum culling"
1279 } else {
1280 "direct"
1281 }
1282 )
1283 .into(),
1284 ),
1285 layout: vec![self.bind_group_layout.clone()],
1286 push_constant_ranges: if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1287 vec![PushConstantRange {
1288 stages: ShaderStages::COMPUTE,
1289 range: 0..4,
1290 }]
1291 } else {
1292 vec![]
1293 },
1294 shader: self.shader.clone(),
1295 shader_defs,
1296 ..default()
1297 }
1298 }
1299}
1300
1301impl FromWorld for PreprocessPipelines {
1302 fn from_world(world: &mut World) -> Self {
1303 let direct_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries();
1306 let gpu_frustum_culling_bind_group_layout_entries = gpu_culling_bind_group_layout_entries();
1307 let gpu_early_occlusion_culling_bind_group_layout_entries =
1308 gpu_occlusion_culling_bind_group_layout_entries().extend_with_indices(((
1309 11,
1310 storage_buffer::<PreprocessWorkItem>(false),
1311 ),));
1312 let gpu_late_occlusion_culling_bind_group_layout_entries =
1313 gpu_occlusion_culling_bind_group_layout_entries();
1314
1315 let reset_indirect_batch_sets_bind_group_layout_entries =
1316 DynamicBindGroupLayoutEntries::sequential(
1317 ShaderStages::COMPUTE,
1318 (storage_buffer::<IndirectBatchSet>(false),),
1319 );
1320
1321 let build_indexed_indirect_params_bind_group_layout_entries =
1324 build_indirect_params_bind_group_layout_entries()
1325 .extend_sequential((storage_buffer::<IndirectParametersIndexed>(false),));
1326 let build_non_indexed_indirect_params_bind_group_layout_entries =
1327 build_indirect_params_bind_group_layout_entries()
1328 .extend_sequential((storage_buffer::<IndirectParametersNonIndexed>(false),));
1329
1330 let direct_bind_group_layout = BindGroupLayoutDescriptor::new(
1332 "build mesh uniforms direct bind group layout",
1333 &direct_bind_group_layout_entries,
1334 );
1335 let gpu_frustum_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1336 "build mesh uniforms GPU frustum culling bind group layout",
1337 &gpu_frustum_culling_bind_group_layout_entries,
1338 );
1339 let gpu_early_occlusion_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1340 "build mesh uniforms GPU early occlusion culling bind group layout",
1341 &gpu_early_occlusion_culling_bind_group_layout_entries,
1342 );
1343 let gpu_late_occlusion_culling_bind_group_layout = BindGroupLayoutDescriptor::new(
1344 "build mesh uniforms GPU late occlusion culling bind group layout",
1345 &gpu_late_occlusion_culling_bind_group_layout_entries,
1346 );
1347 let reset_indirect_batch_sets_bind_group_layout = BindGroupLayoutDescriptor::new(
1348 "reset indirect batch sets bind group layout",
1349 &reset_indirect_batch_sets_bind_group_layout_entries,
1350 );
1351 let build_indexed_indirect_params_bind_group_layout = BindGroupLayoutDescriptor::new(
1352 "build indexed indirect parameters bind group layout",
1353 &build_indexed_indirect_params_bind_group_layout_entries,
1354 );
1355 let build_non_indexed_indirect_params_bind_group_layout = BindGroupLayoutDescriptor::new(
1356 "build non-indexed indirect parameters bind group layout",
1357 &build_non_indexed_indirect_params_bind_group_layout_entries,
1358 );
1359
1360 let preprocess_shader = load_embedded_asset!(world, "mesh_preprocess.wgsl");
1361 let reset_indirect_batch_sets_shader =
1362 load_embedded_asset!(world, "reset_indirect_batch_sets.wgsl");
1363 let build_indirect_params_shader =
1364 load_embedded_asset!(world, "build_indirect_params.wgsl");
1365
1366 let preprocess_phase_pipelines = PreprocessPhasePipelines {
1367 reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline {
1368 bind_group_layout: reset_indirect_batch_sets_bind_group_layout.clone(),
1369 shader: reset_indirect_batch_sets_shader,
1370 pipeline_id: None,
1371 },
1372 gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1373 bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1374 shader: build_indirect_params_shader.clone(),
1375 pipeline_id: None,
1376 },
1377 gpu_occlusion_culling_build_non_indexed_indirect_params:
1378 BuildIndirectParametersPipeline {
1379 bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1380 shader: build_indirect_params_shader.clone(),
1381 pipeline_id: None,
1382 },
1383 };
1384
1385 PreprocessPipelines {
1386 direct_preprocess: PreprocessPipeline {
1387 bind_group_layout: direct_bind_group_layout,
1388 shader: preprocess_shader.clone(),
1389 pipeline_id: None,
1390 },
1391 gpu_frustum_culling_preprocess: PreprocessPipeline {
1392 bind_group_layout: gpu_frustum_culling_bind_group_layout,
1393 shader: preprocess_shader.clone(),
1394 pipeline_id: None,
1395 },
1396 early_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1397 bind_group_layout: gpu_early_occlusion_culling_bind_group_layout,
1398 shader: preprocess_shader.clone(),
1399 pipeline_id: None,
1400 },
1401 late_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1402 bind_group_layout: gpu_late_occlusion_culling_bind_group_layout,
1403 shader: preprocess_shader,
1404 pipeline_id: None,
1405 },
1406 gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1407 bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1408 shader: build_indirect_params_shader.clone(),
1409 pipeline_id: None,
1410 },
1411 gpu_frustum_culling_build_non_indexed_indirect_params:
1412 BuildIndirectParametersPipeline {
1413 bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1414 shader: build_indirect_params_shader,
1415 pipeline_id: None,
1416 },
1417 early_phase: preprocess_phase_pipelines.clone(),
1418 late_phase: preprocess_phase_pipelines.clone(),
1419 main_phase: preprocess_phase_pipelines.clone(),
1420 }
1421 }
1422}
1423
1424fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1425 DynamicBindGroupLayoutEntries::new_with_indices(
1426 ShaderStages::COMPUTE,
1427 (
1428 (
1430 0,
1431 uniform_buffer::<ViewUniform>(true),
1432 ),
1433 (3, storage_buffer_read_only::<MeshInputUniform>(false)),
1435 (4, storage_buffer_read_only::<MeshInputUniform>(false)),
1437 (5, storage_buffer_read_only::<PreprocessWorkItem>(false)),
1439 (6, storage_buffer::<MeshUniform>(false)),
1441 ),
1442 )
1443}
1444
1445fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1448 DynamicBindGroupLayoutEntries::new_with_indices(
1449 ShaderStages::COMPUTE,
1450 (
1451 (0, storage_buffer_read_only::<MeshInputUniform>(false)),
1452 (
1453 1,
1454 storage_buffer_read_only::<IndirectParametersCpuMetadata>(false),
1455 ),
1456 (
1457 2,
1458 storage_buffer_read_only::<IndirectParametersGpuMetadata>(false),
1459 ),
1460 (3, storage_buffer::<IndirectBatchSet>(false)),
1461 ),
1462 )
1463}
1464
1465fn gpu_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1468 preprocess_direct_bind_group_layout_entries().extend_with_indices((
1471 (
1473 7,
1474 storage_buffer_read_only::<IndirectParametersCpuMetadata>(
1475 false,
1476 ),
1477 ),
1478 (
1480 8,
1481 storage_buffer::<IndirectParametersGpuMetadata>(false),
1482 ),
1483 (
1485 9,
1486 storage_buffer_read_only::<MeshCullingData>(false),
1487 ),
1488 ))
1489}
1490
1491fn gpu_occlusion_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1492 gpu_culling_bind_group_layout_entries().extend_with_indices((
1493 (
1494 2,
1495 uniform_buffer::<PreviousViewData>(false),
1496 ),
1497 (
1498 10,
1499 texture_2d(TextureSampleType::Float { filterable: true }),
1500 ),
1501 (
1502 12,
1503 storage_buffer::<LatePreprocessWorkItemIndirectParameters>(
1504 false,
1505 ),
1506 ),
1507 ))
1508}
1509
1510pub fn prepare_preprocess_pipelines(
1512 pipeline_cache: Res<PipelineCache>,
1513 render_device: Res<RenderDevice>,
1514 mut specialized_preprocess_pipelines: ResMut<SpecializedComputePipelines<PreprocessPipeline>>,
1515 mut specialized_reset_indirect_batch_sets_pipelines: ResMut<
1516 SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1517 >,
1518 mut specialized_build_indirect_parameters_pipelines: ResMut<
1519 SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1520 >,
1521 preprocess_pipelines: ResMut<PreprocessPipelines>,
1522 gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1523) {
1524 let preprocess_pipelines = preprocess_pipelines.into_inner();
1525
1526 preprocess_pipelines.direct_preprocess.prepare(
1527 &pipeline_cache,
1528 &mut specialized_preprocess_pipelines,
1529 PreprocessPipelineKey::empty(),
1530 );
1531 preprocess_pipelines.gpu_frustum_culling_preprocess.prepare(
1532 &pipeline_cache,
1533 &mut specialized_preprocess_pipelines,
1534 PreprocessPipelineKey::FRUSTUM_CULLING,
1535 );
1536
1537 if gpu_preprocessing_support.is_culling_supported() {
1538 preprocess_pipelines
1539 .early_gpu_occlusion_culling_preprocess
1540 .prepare(
1541 &pipeline_cache,
1542 &mut specialized_preprocess_pipelines,
1543 PreprocessPipelineKey::FRUSTUM_CULLING
1544 | PreprocessPipelineKey::OCCLUSION_CULLING
1545 | PreprocessPipelineKey::EARLY_PHASE,
1546 );
1547 preprocess_pipelines
1548 .late_gpu_occlusion_culling_preprocess
1549 .prepare(
1550 &pipeline_cache,
1551 &mut specialized_preprocess_pipelines,
1552 PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING,
1553 );
1554 }
1555
1556 let mut build_indirect_parameters_pipeline_key = BuildIndirectParametersPipelineKey::empty();
1557
1558 if render_device
1561 .wgpu_device()
1562 .features()
1563 .contains(WgpuFeatures::MULTI_DRAW_INDIRECT_COUNT)
1564 {
1565 build_indirect_parameters_pipeline_key
1566 .insert(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED);
1567 }
1568
1569 preprocess_pipelines
1570 .gpu_frustum_culling_build_indexed_indirect_params
1571 .prepare(
1572 &pipeline_cache,
1573 &mut specialized_build_indirect_parameters_pipelines,
1574 build_indirect_parameters_pipeline_key | BuildIndirectParametersPipelineKey::INDEXED,
1575 );
1576 preprocess_pipelines
1577 .gpu_frustum_culling_build_non_indexed_indirect_params
1578 .prepare(
1579 &pipeline_cache,
1580 &mut specialized_build_indirect_parameters_pipelines,
1581 build_indirect_parameters_pipeline_key,
1582 );
1583
1584 if !gpu_preprocessing_support.is_culling_supported() {
1585 return;
1586 }
1587
1588 for (preprocess_phase_pipelines, build_indirect_parameters_phase_pipeline_key) in [
1589 (
1590 &mut preprocess_pipelines.early_phase,
1591 BuildIndirectParametersPipelineKey::EARLY_PHASE,
1592 ),
1593 (
1594 &mut preprocess_pipelines.late_phase,
1595 BuildIndirectParametersPipelineKey::LATE_PHASE,
1596 ),
1597 (
1598 &mut preprocess_pipelines.main_phase,
1599 BuildIndirectParametersPipelineKey::MAIN_PHASE,
1600 ),
1601 ] {
1602 preprocess_phase_pipelines
1603 .reset_indirect_batch_sets
1604 .prepare(
1605 &pipeline_cache,
1606 &mut specialized_reset_indirect_batch_sets_pipelines,
1607 );
1608 preprocess_phase_pipelines
1609 .gpu_occlusion_culling_build_indexed_indirect_params
1610 .prepare(
1611 &pipeline_cache,
1612 &mut specialized_build_indirect_parameters_pipelines,
1613 build_indirect_parameters_pipeline_key
1614 | build_indirect_parameters_phase_pipeline_key
1615 | BuildIndirectParametersPipelineKey::INDEXED
1616 | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1617 );
1618 preprocess_phase_pipelines
1619 .gpu_occlusion_culling_build_non_indexed_indirect_params
1620 .prepare(
1621 &pipeline_cache,
1622 &mut specialized_build_indirect_parameters_pipelines,
1623 build_indirect_parameters_pipeline_key
1624 | build_indirect_parameters_phase_pipeline_key
1625 | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1626 );
1627 }
1628}
1629
1630impl PreprocessPipeline {
1631 fn prepare(
1632 &mut self,
1633 pipeline_cache: &PipelineCache,
1634 pipelines: &mut SpecializedComputePipelines<PreprocessPipeline>,
1635 key: PreprocessPipelineKey,
1636 ) {
1637 if self.pipeline_id.is_some() {
1638 return;
1639 }
1640
1641 let preprocess_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1642 self.pipeline_id = Some(preprocess_pipeline_id);
1643 }
1644}
1645
1646impl SpecializedComputePipeline for ResetIndirectBatchSetsPipeline {
1647 type Key = ();
1648
1649 fn specialize(&self, _: Self::Key) -> ComputePipelineDescriptor {
1650 ComputePipelineDescriptor {
1651 label: Some("reset indirect batch sets".into()),
1652 layout: vec![self.bind_group_layout.clone()],
1653 shader: self.shader.clone(),
1654 ..default()
1655 }
1656 }
1657}
1658
1659impl SpecializedComputePipeline for BuildIndirectParametersPipeline {
1660 type Key = BuildIndirectParametersPipelineKey;
1661
1662 fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1663 let mut shader_defs = vec![];
1664 if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1665 shader_defs.push("INDEXED".into());
1666 }
1667 if key.contains(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED) {
1668 shader_defs.push("MULTI_DRAW_INDIRECT_COUNT_SUPPORTED".into());
1669 }
1670 if key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1671 shader_defs.push("OCCLUSION_CULLING".into());
1672 }
1673 if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1674 shader_defs.push("EARLY_PHASE".into());
1675 }
1676 if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1677 shader_defs.push("LATE_PHASE".into());
1678 }
1679 if key.contains(BuildIndirectParametersPipelineKey::MAIN_PHASE) {
1680 shader_defs.push("MAIN_PHASE".into());
1681 }
1682
1683 let label = format!(
1684 "{} build {}indexed indirect parameters",
1685 if !key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1686 "frustum culling"
1687 } else if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1688 "early occlusion culling"
1689 } else if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1690 "late occlusion culling"
1691 } else {
1692 "main occlusion culling"
1693 },
1694 if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1695 ""
1696 } else {
1697 "non-"
1698 }
1699 );
1700
1701 ComputePipelineDescriptor {
1702 label: Some(label.into()),
1703 layout: vec![self.bind_group_layout.clone()],
1704 shader: self.shader.clone(),
1705 shader_defs,
1706 ..default()
1707 }
1708 }
1709}
1710
1711impl ResetIndirectBatchSetsPipeline {
1712 fn prepare(
1713 &mut self,
1714 pipeline_cache: &PipelineCache,
1715 pipelines: &mut SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1716 ) {
1717 if self.pipeline_id.is_some() {
1718 return;
1719 }
1720
1721 let reset_indirect_batch_sets_pipeline_id = pipelines.specialize(pipeline_cache, self, ());
1722 self.pipeline_id = Some(reset_indirect_batch_sets_pipeline_id);
1723 }
1724}
1725
1726impl BuildIndirectParametersPipeline {
1727 fn prepare(
1728 &mut self,
1729 pipeline_cache: &PipelineCache,
1730 pipelines: &mut SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1731 key: BuildIndirectParametersPipelineKey,
1732 ) {
1733 if self.pipeline_id.is_some() {
1734 return;
1735 }
1736
1737 let build_indirect_parameters_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1738 self.pipeline_id = Some(build_indirect_parameters_pipeline_id);
1739 }
1740}
1741
1742#[expect(
1745 clippy::too_many_arguments,
1746 reason = "it's a system that needs a lot of arguments"
1747)]
1748pub fn prepare_preprocess_bind_groups(
1749 mut commands: Commands,
1750 views: Query<(Entity, &ExtractedView)>,
1751 view_depth_pyramids: Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1752 render_device: Res<RenderDevice>,
1753 pipeline_cache: Res<PipelineCache>,
1754 batched_instance_buffers: Res<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
1755 indirect_parameters_buffers: Res<IndirectParametersBuffers>,
1756 mesh_culling_data_buffer: Res<MeshCullingDataBuffer>,
1757 view_uniforms: Res<ViewUniforms>,
1758 previous_view_uniforms: Res<PreviousViewUniforms>,
1759 pipelines: Res<PreprocessPipelines>,
1760) {
1761 let BatchedInstanceBuffers {
1763 current_input_buffer: current_input_buffer_vec,
1764 previous_input_buffer: previous_input_buffer_vec,
1765 phase_instance_buffers,
1766 } = batched_instance_buffers.into_inner();
1767
1768 let (Some(current_input_buffer), Some(previous_input_buffer)) = (
1769 current_input_buffer_vec.buffer().buffer(),
1770 previous_input_buffer_vec.buffer().buffer(),
1771 ) else {
1772 return;
1773 };
1774
1775 let mut any_indirect = false;
1778
1779 for (view_entity, view) in &views {
1781 let mut bind_groups = TypeIdMap::default();
1782
1783 for (phase_type_id, phase_instance_buffers) in phase_instance_buffers {
1785 let UntypedPhaseBatchedInstanceBuffers {
1786 data_buffer: ref data_buffer_vec,
1787 ref work_item_buffers,
1788 ref late_indexed_indirect_parameters_buffer,
1789 ref late_non_indexed_indirect_parameters_buffer,
1790 } = *phase_instance_buffers;
1791
1792 let Some(data_buffer) = data_buffer_vec.buffer() else {
1793 continue;
1794 };
1795
1796 let Some(phase_indirect_parameters_buffers) =
1798 indirect_parameters_buffers.get(phase_type_id)
1799 else {
1800 continue;
1801 };
1802
1803 let Some(work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) else {
1804 continue;
1805 };
1806
1807 let preprocess_bind_group_builder = PreprocessBindGroupBuilder {
1809 view: view_entity,
1810 late_indexed_indirect_parameters_buffer,
1811 late_non_indexed_indirect_parameters_buffer,
1812 render_device: &render_device,
1813 pipeline_cache: &pipeline_cache,
1814 phase_indirect_parameters_buffers,
1815 mesh_culling_data_buffer: &mesh_culling_data_buffer,
1816 view_uniforms: &view_uniforms,
1817 previous_view_uniforms: &previous_view_uniforms,
1818 pipelines: &pipelines,
1819 current_input_buffer,
1820 previous_input_buffer,
1821 data_buffer,
1822 };
1823
1824 let (was_indirect, bind_group) = match *work_item_buffers {
1827 PreprocessWorkItemBuffers::Direct(ref work_item_buffer) => (
1828 false,
1829 preprocess_bind_group_builder
1830 .create_direct_preprocess_bind_groups(work_item_buffer),
1831 ),
1832
1833 PreprocessWorkItemBuffers::Indirect {
1834 indexed: ref indexed_work_item_buffer,
1835 non_indexed: ref non_indexed_work_item_buffer,
1836 gpu_occlusion_culling: Some(ref gpu_occlusion_culling_work_item_buffers),
1837 } => (
1838 true,
1839 preprocess_bind_group_builder
1840 .create_indirect_occlusion_culling_preprocess_bind_groups(
1841 &view_depth_pyramids,
1842 indexed_work_item_buffer,
1843 non_indexed_work_item_buffer,
1844 gpu_occlusion_culling_work_item_buffers,
1845 ),
1846 ),
1847
1848 PreprocessWorkItemBuffers::Indirect {
1849 indexed: ref indexed_work_item_buffer,
1850 non_indexed: ref non_indexed_work_item_buffer,
1851 gpu_occlusion_culling: None,
1852 } => (
1853 true,
1854 preprocess_bind_group_builder
1855 .create_indirect_frustum_culling_preprocess_bind_groups(
1856 indexed_work_item_buffer,
1857 non_indexed_work_item_buffer,
1858 ),
1859 ),
1860 };
1861
1862 if let Some(bind_group) = bind_group {
1864 any_indirect = any_indirect || was_indirect;
1865 bind_groups.insert(*phase_type_id, bind_group);
1866 }
1867 }
1868
1869 commands
1871 .entity(view_entity)
1872 .insert(PreprocessBindGroups(bind_groups));
1873 }
1874
1875 if any_indirect {
1878 create_build_indirect_parameters_bind_groups(
1879 &mut commands,
1880 &render_device,
1881 &pipeline_cache,
1882 &pipelines,
1883 current_input_buffer,
1884 &indirect_parameters_buffers,
1885 );
1886 }
1887}
1888
1889struct PreprocessBindGroupBuilder<'a> {
1892 view: Entity,
1894 late_indexed_indirect_parameters_buffer:
1897 &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1898 late_non_indexed_indirect_parameters_buffer:
1901 &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1902 render_device: &'a RenderDevice,
1904 pipeline_cache: &'a PipelineCache,
1906 phase_indirect_parameters_buffers: &'a UntypedPhaseIndirectParametersBuffers,
1908 mesh_culling_data_buffer: &'a MeshCullingDataBuffer,
1910 view_uniforms: &'a ViewUniforms,
1912 previous_view_uniforms: &'a PreviousViewUniforms,
1914 pipelines: &'a PreprocessPipelines,
1916 current_input_buffer: &'a Buffer,
1919 previous_input_buffer: &'a Buffer,
1922 data_buffer: &'a Buffer,
1928}
1929
1930impl<'a> PreprocessBindGroupBuilder<'a> {
1931 fn create_direct_preprocess_bind_groups(
1934 &self,
1935 work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1936 ) -> Option<PhasePreprocessBindGroups> {
1937 let work_item_buffer_size = NonZero::<u64>::try_from(
1941 work_item_buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()),
1942 )
1943 .ok();
1944
1945 Some(PhasePreprocessBindGroups::Direct(
1946 self.render_device.create_bind_group(
1947 "preprocess_direct_bind_group",
1948 &self
1949 .pipeline_cache
1950 .get_bind_group_layout(&self.pipelines.direct_preprocess.bind_group_layout),
1951 &BindGroupEntries::with_indices((
1952 (0, self.view_uniforms.uniforms.binding()?),
1953 (3, self.current_input_buffer.as_entire_binding()),
1954 (4, self.previous_input_buffer.as_entire_binding()),
1955 (
1956 5,
1957 BindingResource::Buffer(BufferBinding {
1958 buffer: work_item_buffer.buffer()?,
1959 offset: 0,
1960 size: work_item_buffer_size,
1961 }),
1962 ),
1963 (6, self.data_buffer.as_entire_binding()),
1964 )),
1965 ),
1966 ))
1967 }
1968
1969 fn create_indirect_occlusion_culling_preprocess_bind_groups(
1972 &self,
1973 view_depth_pyramids: &Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1974 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1975 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1976 gpu_occlusion_culling_work_item_buffers: &GpuOcclusionCullingWorkItemBuffers,
1977 ) -> Option<PhasePreprocessBindGroups> {
1978 let GpuOcclusionCullingWorkItemBuffers {
1979 late_indexed: ref late_indexed_work_item_buffer,
1980 late_non_indexed: ref late_non_indexed_work_item_buffer,
1981 ..
1982 } = *gpu_occlusion_culling_work_item_buffers;
1983
1984 let (view_depth_pyramid, previous_view_uniform_offset) =
1985 view_depth_pyramids.get(self.view).ok()?;
1986
1987 Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
1988 early_indexed: self.create_indirect_occlusion_culling_early_indexed_bind_group(
1989 view_depth_pyramid,
1990 previous_view_uniform_offset,
1991 indexed_work_item_buffer,
1992 late_indexed_work_item_buffer,
1993 ),
1994
1995 early_non_indexed: self.create_indirect_occlusion_culling_early_non_indexed_bind_group(
1996 view_depth_pyramid,
1997 previous_view_uniform_offset,
1998 non_indexed_work_item_buffer,
1999 late_non_indexed_work_item_buffer,
2000 ),
2001
2002 late_indexed: self.create_indirect_occlusion_culling_late_indexed_bind_group(
2003 view_depth_pyramid,
2004 previous_view_uniform_offset,
2005 late_indexed_work_item_buffer,
2006 ),
2007
2008 late_non_indexed: self.create_indirect_occlusion_culling_late_non_indexed_bind_group(
2009 view_depth_pyramid,
2010 previous_view_uniform_offset,
2011 late_non_indexed_work_item_buffer,
2012 ),
2013 })
2014 }
2015
2016 fn create_indirect_occlusion_culling_early_indexed_bind_group(
2019 &self,
2020 view_depth_pyramid: &ViewDepthPyramid,
2021 previous_view_uniform_offset: &PreviousViewUniformOffset,
2022 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2023 late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2024 ) -> Option<BindGroup> {
2025 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2026 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2027 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2028
2029 match (
2030 self.phase_indirect_parameters_buffers
2031 .indexed
2032 .cpu_metadata_buffer(),
2033 self.phase_indirect_parameters_buffers
2034 .indexed
2035 .gpu_metadata_buffer(),
2036 indexed_work_item_buffer.buffer(),
2037 late_indexed_work_item_buffer.buffer(),
2038 self.late_indexed_indirect_parameters_buffer.buffer(),
2039 ) {
2040 (
2041 Some(indexed_cpu_metadata_buffer),
2042 Some(indexed_gpu_metadata_buffer),
2043 Some(indexed_work_item_gpu_buffer),
2044 Some(late_indexed_work_item_gpu_buffer),
2045 Some(late_indexed_indirect_parameters_buffer),
2046 ) => {
2047 let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2051 indexed_work_item_buffer.len() as u64
2052 * u64::from(PreprocessWorkItem::min_size()),
2053 )
2054 .ok();
2055
2056 Some(
2057 self.render_device.create_bind_group(
2058 "preprocess_early_indexed_gpu_occlusion_culling_bind_group",
2059 &self.pipeline_cache.get_bind_group_layout(
2060 &self
2061 .pipelines
2062 .early_gpu_occlusion_culling_preprocess
2063 .bind_group_layout,
2064 ),
2065 &BindGroupEntries::with_indices((
2066 (3, self.current_input_buffer.as_entire_binding()),
2067 (4, self.previous_input_buffer.as_entire_binding()),
2068 (
2069 5,
2070 BindingResource::Buffer(BufferBinding {
2071 buffer: indexed_work_item_gpu_buffer,
2072 offset: 0,
2073 size: indexed_work_item_buffer_size,
2074 }),
2075 ),
2076 (6, self.data_buffer.as_entire_binding()),
2077 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2078 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2079 (9, mesh_culling_data_buffer.as_entire_binding()),
2080 (0, view_uniforms_binding.clone()),
2081 (10, &view_depth_pyramid.all_mips),
2082 (
2083 2,
2084 BufferBinding {
2085 buffer: previous_view_buffer,
2086 offset: previous_view_uniform_offset.offset as u64,
2087 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2088 },
2089 ),
2090 (
2091 11,
2092 BufferBinding {
2093 buffer: late_indexed_work_item_gpu_buffer,
2094 offset: 0,
2095 size: indexed_work_item_buffer_size,
2096 },
2097 ),
2098 (
2099 12,
2100 BufferBinding {
2101 buffer: late_indexed_indirect_parameters_buffer,
2102 offset: 0,
2103 size: NonZeroU64::new(
2104 late_indexed_indirect_parameters_buffer.size(),
2105 ),
2106 },
2107 ),
2108 )),
2109 ),
2110 )
2111 }
2112 _ => None,
2113 }
2114 }
2115
2116 fn create_indirect_occlusion_culling_early_non_indexed_bind_group(
2119 &self,
2120 view_depth_pyramid: &ViewDepthPyramid,
2121 previous_view_uniform_offset: &PreviousViewUniformOffset,
2122 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2123 late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2124 ) -> Option<BindGroup> {
2125 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2126 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2127 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2128
2129 match (
2130 self.phase_indirect_parameters_buffers
2131 .non_indexed
2132 .cpu_metadata_buffer(),
2133 self.phase_indirect_parameters_buffers
2134 .non_indexed
2135 .gpu_metadata_buffer(),
2136 non_indexed_work_item_buffer.buffer(),
2137 late_non_indexed_work_item_buffer.buffer(),
2138 self.late_non_indexed_indirect_parameters_buffer.buffer(),
2139 ) {
2140 (
2141 Some(non_indexed_cpu_metadata_buffer),
2142 Some(non_indexed_gpu_metadata_buffer),
2143 Some(non_indexed_work_item_gpu_buffer),
2144 Some(late_non_indexed_work_item_buffer),
2145 Some(late_non_indexed_indirect_parameters_buffer),
2146 ) => {
2147 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2151 non_indexed_work_item_buffer.len() as u64
2152 * u64::from(PreprocessWorkItem::min_size()),
2153 )
2154 .ok();
2155
2156 Some(
2157 self.render_device.create_bind_group(
2158 "preprocess_early_non_indexed_gpu_occlusion_culling_bind_group",
2159 &self.pipeline_cache.get_bind_group_layout(
2160 &self
2161 .pipelines
2162 .early_gpu_occlusion_culling_preprocess
2163 .bind_group_layout,
2164 ),
2165 &BindGroupEntries::with_indices((
2166 (3, self.current_input_buffer.as_entire_binding()),
2167 (4, self.previous_input_buffer.as_entire_binding()),
2168 (
2169 5,
2170 BindingResource::Buffer(BufferBinding {
2171 buffer: non_indexed_work_item_gpu_buffer,
2172 offset: 0,
2173 size: non_indexed_work_item_buffer_size,
2174 }),
2175 ),
2176 (6, self.data_buffer.as_entire_binding()),
2177 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2178 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2179 (9, mesh_culling_data_buffer.as_entire_binding()),
2180 (0, view_uniforms_binding.clone()),
2181 (10, &view_depth_pyramid.all_mips),
2182 (
2183 2,
2184 BufferBinding {
2185 buffer: previous_view_buffer,
2186 offset: previous_view_uniform_offset.offset as u64,
2187 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2188 },
2189 ),
2190 (
2191 11,
2192 BufferBinding {
2193 buffer: late_non_indexed_work_item_buffer,
2194 offset: 0,
2195 size: non_indexed_work_item_buffer_size,
2196 },
2197 ),
2198 (
2199 12,
2200 BufferBinding {
2201 buffer: late_non_indexed_indirect_parameters_buffer,
2202 offset: 0,
2203 size: NonZeroU64::new(
2204 late_non_indexed_indirect_parameters_buffer.size(),
2205 ),
2206 },
2207 ),
2208 )),
2209 ),
2210 )
2211 }
2212 _ => None,
2213 }
2214 }
2215
2216 fn create_indirect_occlusion_culling_late_indexed_bind_group(
2219 &self,
2220 view_depth_pyramid: &ViewDepthPyramid,
2221 previous_view_uniform_offset: &PreviousViewUniformOffset,
2222 late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2223 ) -> Option<BindGroup> {
2224 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2225 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2226 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2227
2228 match (
2229 self.phase_indirect_parameters_buffers
2230 .indexed
2231 .cpu_metadata_buffer(),
2232 self.phase_indirect_parameters_buffers
2233 .indexed
2234 .gpu_metadata_buffer(),
2235 late_indexed_work_item_buffer.buffer(),
2236 self.late_indexed_indirect_parameters_buffer.buffer(),
2237 ) {
2238 (
2239 Some(indexed_cpu_metadata_buffer),
2240 Some(indexed_gpu_metadata_buffer),
2241 Some(late_indexed_work_item_gpu_buffer),
2242 Some(late_indexed_indirect_parameters_buffer),
2243 ) => {
2244 let late_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2248 late_indexed_work_item_buffer.len() as u64
2249 * u64::from(PreprocessWorkItem::min_size()),
2250 )
2251 .ok();
2252
2253 Some(
2254 self.render_device.create_bind_group(
2255 "preprocess_late_indexed_gpu_occlusion_culling_bind_group",
2256 &self.pipeline_cache.get_bind_group_layout(
2257 &self
2258 .pipelines
2259 .late_gpu_occlusion_culling_preprocess
2260 .bind_group_layout,
2261 ),
2262 &BindGroupEntries::with_indices((
2263 (3, self.current_input_buffer.as_entire_binding()),
2264 (4, self.previous_input_buffer.as_entire_binding()),
2265 (
2266 5,
2267 BindingResource::Buffer(BufferBinding {
2268 buffer: late_indexed_work_item_gpu_buffer,
2269 offset: 0,
2270 size: late_indexed_work_item_buffer_size,
2271 }),
2272 ),
2273 (6, self.data_buffer.as_entire_binding()),
2274 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2275 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2276 (9, mesh_culling_data_buffer.as_entire_binding()),
2277 (0, view_uniforms_binding.clone()),
2278 (10, &view_depth_pyramid.all_mips),
2279 (
2280 2,
2281 BufferBinding {
2282 buffer: previous_view_buffer,
2283 offset: previous_view_uniform_offset.offset as u64,
2284 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2285 },
2286 ),
2287 (
2288 12,
2289 BufferBinding {
2290 buffer: late_indexed_indirect_parameters_buffer,
2291 offset: 0,
2292 size: NonZeroU64::new(
2293 late_indexed_indirect_parameters_buffer.size(),
2294 ),
2295 },
2296 ),
2297 )),
2298 ),
2299 )
2300 }
2301 _ => None,
2302 }
2303 }
2304
2305 fn create_indirect_occlusion_culling_late_non_indexed_bind_group(
2308 &self,
2309 view_depth_pyramid: &ViewDepthPyramid,
2310 previous_view_uniform_offset: &PreviousViewUniformOffset,
2311 late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2312 ) -> Option<BindGroup> {
2313 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2314 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2315 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2316
2317 match (
2318 self.phase_indirect_parameters_buffers
2319 .non_indexed
2320 .cpu_metadata_buffer(),
2321 self.phase_indirect_parameters_buffers
2322 .non_indexed
2323 .gpu_metadata_buffer(),
2324 late_non_indexed_work_item_buffer.buffer(),
2325 self.late_non_indexed_indirect_parameters_buffer.buffer(),
2326 ) {
2327 (
2328 Some(non_indexed_cpu_metadata_buffer),
2329 Some(non_indexed_gpu_metadata_buffer),
2330 Some(non_indexed_work_item_gpu_buffer),
2331 Some(late_non_indexed_indirect_parameters_buffer),
2332 ) => {
2333 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2337 late_non_indexed_work_item_buffer.len() as u64
2338 * u64::from(PreprocessWorkItem::min_size()),
2339 )
2340 .ok();
2341
2342 Some(
2343 self.render_device.create_bind_group(
2344 "preprocess_late_non_indexed_gpu_occlusion_culling_bind_group",
2345 &self.pipeline_cache.get_bind_group_layout(
2346 &self
2347 .pipelines
2348 .late_gpu_occlusion_culling_preprocess
2349 .bind_group_layout,
2350 ),
2351 &BindGroupEntries::with_indices((
2352 (3, self.current_input_buffer.as_entire_binding()),
2353 (4, self.previous_input_buffer.as_entire_binding()),
2354 (
2355 5,
2356 BindingResource::Buffer(BufferBinding {
2357 buffer: non_indexed_work_item_gpu_buffer,
2358 offset: 0,
2359 size: non_indexed_work_item_buffer_size,
2360 }),
2361 ),
2362 (6, self.data_buffer.as_entire_binding()),
2363 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2364 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2365 (9, mesh_culling_data_buffer.as_entire_binding()),
2366 (0, view_uniforms_binding.clone()),
2367 (10, &view_depth_pyramid.all_mips),
2368 (
2369 2,
2370 BufferBinding {
2371 buffer: previous_view_buffer,
2372 offset: previous_view_uniform_offset.offset as u64,
2373 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2374 },
2375 ),
2376 (
2377 12,
2378 BufferBinding {
2379 buffer: late_non_indexed_indirect_parameters_buffer,
2380 offset: 0,
2381 size: NonZeroU64::new(
2382 late_non_indexed_indirect_parameters_buffer.size(),
2383 ),
2384 },
2385 ),
2386 )),
2387 ),
2388 )
2389 }
2390 _ => None,
2391 }
2392 }
2393
2394 fn create_indirect_frustum_culling_preprocess_bind_groups(
2397 &self,
2398 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2399 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2400 ) -> Option<PhasePreprocessBindGroups> {
2401 Some(PhasePreprocessBindGroups::IndirectFrustumCulling {
2402 indexed: self
2403 .create_indirect_frustum_culling_indexed_bind_group(indexed_work_item_buffer),
2404 non_indexed: self.create_indirect_frustum_culling_non_indexed_bind_group(
2405 non_indexed_work_item_buffer,
2406 ),
2407 })
2408 }
2409
2410 fn create_indirect_frustum_culling_indexed_bind_group(
2413 &self,
2414 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2415 ) -> Option<BindGroup> {
2416 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2417 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2418
2419 match (
2420 self.phase_indirect_parameters_buffers
2421 .indexed
2422 .cpu_metadata_buffer(),
2423 self.phase_indirect_parameters_buffers
2424 .indexed
2425 .gpu_metadata_buffer(),
2426 indexed_work_item_buffer.buffer(),
2427 ) {
2428 (
2429 Some(indexed_cpu_metadata_buffer),
2430 Some(indexed_gpu_metadata_buffer),
2431 Some(indexed_work_item_gpu_buffer),
2432 ) => {
2433 let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2437 indexed_work_item_buffer.len() as u64
2438 * u64::from(PreprocessWorkItem::min_size()),
2439 )
2440 .ok();
2441
2442 Some(
2443 self.render_device.create_bind_group(
2444 "preprocess_gpu_indexed_frustum_culling_bind_group",
2445 &self.pipeline_cache.get_bind_group_layout(
2446 &self
2447 .pipelines
2448 .gpu_frustum_culling_preprocess
2449 .bind_group_layout,
2450 ),
2451 &BindGroupEntries::with_indices((
2452 (3, self.current_input_buffer.as_entire_binding()),
2453 (4, self.previous_input_buffer.as_entire_binding()),
2454 (
2455 5,
2456 BindingResource::Buffer(BufferBinding {
2457 buffer: indexed_work_item_gpu_buffer,
2458 offset: 0,
2459 size: indexed_work_item_buffer_size,
2460 }),
2461 ),
2462 (6, self.data_buffer.as_entire_binding()),
2463 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2464 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2465 (9, mesh_culling_data_buffer.as_entire_binding()),
2466 (0, view_uniforms_binding.clone()),
2467 )),
2468 ),
2469 )
2470 }
2471 _ => None,
2472 }
2473 }
2474
2475 fn create_indirect_frustum_culling_non_indexed_bind_group(
2478 &self,
2479 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2480 ) -> Option<BindGroup> {
2481 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2482 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2483
2484 match (
2485 self.phase_indirect_parameters_buffers
2486 .non_indexed
2487 .cpu_metadata_buffer(),
2488 self.phase_indirect_parameters_buffers
2489 .non_indexed
2490 .gpu_metadata_buffer(),
2491 non_indexed_work_item_buffer.buffer(),
2492 ) {
2493 (
2494 Some(non_indexed_cpu_metadata_buffer),
2495 Some(non_indexed_gpu_metadata_buffer),
2496 Some(non_indexed_work_item_gpu_buffer),
2497 ) => {
2498 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2502 non_indexed_work_item_buffer.len() as u64
2503 * u64::from(PreprocessWorkItem::min_size()),
2504 )
2505 .ok();
2506
2507 Some(
2508 self.render_device.create_bind_group(
2509 "preprocess_gpu_non_indexed_frustum_culling_bind_group",
2510 &self.pipeline_cache.get_bind_group_layout(
2511 &self
2512 .pipelines
2513 .gpu_frustum_culling_preprocess
2514 .bind_group_layout,
2515 ),
2516 &BindGroupEntries::with_indices((
2517 (3, self.current_input_buffer.as_entire_binding()),
2518 (4, self.previous_input_buffer.as_entire_binding()),
2519 (
2520 5,
2521 BindingResource::Buffer(BufferBinding {
2522 buffer: non_indexed_work_item_gpu_buffer,
2523 offset: 0,
2524 size: non_indexed_work_item_buffer_size,
2525 }),
2526 ),
2527 (6, self.data_buffer.as_entire_binding()),
2528 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2529 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2530 (9, mesh_culling_data_buffer.as_entire_binding()),
2531 (0, view_uniforms_binding.clone()),
2532 )),
2533 ),
2534 )
2535 }
2536 _ => None,
2537 }
2538 }
2539}
2540
2541fn create_build_indirect_parameters_bind_groups(
2545 commands: &mut Commands,
2546 render_device: &RenderDevice,
2547 pipeline_cache: &PipelineCache,
2548 pipelines: &PreprocessPipelines,
2549 current_input_buffer: &Buffer,
2550 indirect_parameters_buffers: &IndirectParametersBuffers,
2551) {
2552 let mut build_indirect_parameters_bind_groups = BuildIndirectParametersBindGroups::new();
2553
2554 for (phase_type_id, phase_indirect_parameters_buffer) in indirect_parameters_buffers.iter() {
2555 build_indirect_parameters_bind_groups.insert(
2556 *phase_type_id,
2557 PhaseBuildIndirectParametersBindGroups {
2558 reset_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2559 .indexed
2560 .batch_sets_buffer(),)
2561 {
2562 (Some(indexed_batch_sets_buffer),) => Some(
2563 render_device.create_bind_group(
2564 "reset_indexed_indirect_batch_sets_bind_group",
2565 &pipeline_cache.get_bind_group_layout(
2568 &pipelines
2569 .early_phase
2570 .reset_indirect_batch_sets
2571 .bind_group_layout,
2572 ),
2573 &BindGroupEntries::sequential((
2574 indexed_batch_sets_buffer.as_entire_binding(),
2575 )),
2576 ),
2577 ),
2578 _ => None,
2579 },
2580
2581 reset_non_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2582 .non_indexed
2583 .batch_sets_buffer(),)
2584 {
2585 (Some(non_indexed_batch_sets_buffer),) => Some(
2586 render_device.create_bind_group(
2587 "reset_non_indexed_indirect_batch_sets_bind_group",
2588 &pipeline_cache.get_bind_group_layout(
2591 &pipelines
2592 .early_phase
2593 .reset_indirect_batch_sets
2594 .bind_group_layout,
2595 ),
2596 &BindGroupEntries::sequential((
2597 non_indexed_batch_sets_buffer.as_entire_binding(),
2598 )),
2599 ),
2600 ),
2601 _ => None,
2602 },
2603
2604 build_indexed_indirect: match (
2605 phase_indirect_parameters_buffer
2606 .indexed
2607 .cpu_metadata_buffer(),
2608 phase_indirect_parameters_buffer
2609 .indexed
2610 .gpu_metadata_buffer(),
2611 phase_indirect_parameters_buffer.indexed.data_buffer(),
2612 phase_indirect_parameters_buffer.indexed.batch_sets_buffer(),
2613 ) {
2614 (
2615 Some(indexed_indirect_parameters_cpu_metadata_buffer),
2616 Some(indexed_indirect_parameters_gpu_metadata_buffer),
2617 Some(indexed_indirect_parameters_data_buffer),
2618 Some(indexed_batch_sets_buffer),
2619 ) => Some(
2620 render_device.create_bind_group(
2621 "build_indexed_indirect_parameters_bind_group",
2622 &pipeline_cache.get_bind_group_layout(
2625 &pipelines
2626 .gpu_frustum_culling_build_indexed_indirect_params
2627 .bind_group_layout,
2628 ),
2629 &BindGroupEntries::sequential((
2630 current_input_buffer.as_entire_binding(),
2631 BufferBinding {
2634 buffer: indexed_indirect_parameters_cpu_metadata_buffer,
2635 offset: 0,
2636 size: NonZeroU64::new(
2637 phase_indirect_parameters_buffer.indexed.batch_count()
2638 as u64
2639 * size_of::<IndirectParametersCpuMetadata>() as u64,
2640 ),
2641 },
2642 BufferBinding {
2643 buffer: indexed_indirect_parameters_gpu_metadata_buffer,
2644 offset: 0,
2645 size: NonZeroU64::new(
2646 phase_indirect_parameters_buffer.indexed.batch_count()
2647 as u64
2648 * size_of::<IndirectParametersGpuMetadata>() as u64,
2649 ),
2650 },
2651 indexed_batch_sets_buffer.as_entire_binding(),
2652 indexed_indirect_parameters_data_buffer.as_entire_binding(),
2653 )),
2654 ),
2655 ),
2656 _ => None,
2657 },
2658
2659 build_non_indexed_indirect: match (
2660 phase_indirect_parameters_buffer
2661 .non_indexed
2662 .cpu_metadata_buffer(),
2663 phase_indirect_parameters_buffer
2664 .non_indexed
2665 .gpu_metadata_buffer(),
2666 phase_indirect_parameters_buffer.non_indexed.data_buffer(),
2667 phase_indirect_parameters_buffer
2668 .non_indexed
2669 .batch_sets_buffer(),
2670 ) {
2671 (
2672 Some(non_indexed_indirect_parameters_cpu_metadata_buffer),
2673 Some(non_indexed_indirect_parameters_gpu_metadata_buffer),
2674 Some(non_indexed_indirect_parameters_data_buffer),
2675 Some(non_indexed_batch_sets_buffer),
2676 ) => Some(
2677 render_device.create_bind_group(
2678 "build_non_indexed_indirect_parameters_bind_group",
2679 &pipeline_cache.get_bind_group_layout(
2682 &pipelines
2683 .gpu_frustum_culling_build_non_indexed_indirect_params
2684 .bind_group_layout,
2685 ),
2686 &BindGroupEntries::sequential((
2687 current_input_buffer.as_entire_binding(),
2688 BufferBinding {
2691 buffer: non_indexed_indirect_parameters_cpu_metadata_buffer,
2692 offset: 0,
2693 size: NonZeroU64::new(
2694 phase_indirect_parameters_buffer.non_indexed.batch_count()
2695 as u64
2696 * size_of::<IndirectParametersCpuMetadata>() as u64,
2697 ),
2698 },
2699 BufferBinding {
2700 buffer: non_indexed_indirect_parameters_gpu_metadata_buffer,
2701 offset: 0,
2702 size: NonZeroU64::new(
2703 phase_indirect_parameters_buffer.non_indexed.batch_count()
2704 as u64
2705 * size_of::<IndirectParametersGpuMetadata>() as u64,
2706 ),
2707 },
2708 non_indexed_batch_sets_buffer.as_entire_binding(),
2709 non_indexed_indirect_parameters_data_buffer.as_entire_binding(),
2710 )),
2711 ),
2712 ),
2713 _ => None,
2714 },
2715 },
2716 );
2717 }
2718
2719 commands.insert_resource(build_indirect_parameters_bind_groups);
2720}
2721
2722pub fn write_mesh_culling_data_buffer(
2724 render_device: Res<RenderDevice>,
2725 render_queue: Res<RenderQueue>,
2726 mut mesh_culling_data_buffer: ResMut<MeshCullingDataBuffer>,
2727) {
2728 mesh_culling_data_buffer.write_buffer(&render_device, &render_queue);
2729}