1use core::num::{NonZero, NonZeroU64};
10
11use bevy_app::{App, Plugin};
12use bevy_asset::{load_internal_asset, weak_handle, Handle};
13use bevy_core_pipeline::{
14 core_3d::graph::{Core3d, Node3d},
15 experimental::mip_generation::ViewDepthPyramid,
16 prepass::{DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms},
17};
18use bevy_derive::{Deref, DerefMut};
19use bevy_ecs::{
20 component::Component,
21 entity::Entity,
22 prelude::resource_exists,
23 query::{Has, Or, QueryState, With, Without},
24 resource::Resource,
25 schedule::IntoScheduleConfigs as _,
26 system::{lifetimeless::Read, Commands, Query, Res, ResMut},
27 world::{FromWorld, World},
28};
29use bevy_render::batching::gpu_preprocessing::{
30 GpuPreprocessingMode, IndirectParametersGpuMetadata, UntypedPhaseIndirectParametersBuffers,
31};
32use bevy_render::{
33 batching::gpu_preprocessing::{
34 BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingSupport,
35 IndirectBatchSet, IndirectParametersBuffers, IndirectParametersCpuMetadata,
36 IndirectParametersIndexed, IndirectParametersNonIndexed,
37 LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem, PreprocessWorkItemBuffers,
38 UntypedPhaseBatchedInstanceBuffers,
39 },
40 experimental::occlusion_culling::OcclusionCulling,
41 render_graph::{Node, NodeRunError, RenderGraphApp, RenderGraphContext},
42 render_resource::{
43 binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer},
44 BindGroup, BindGroupEntries, BindGroupLayout, BindingResource, Buffer, BufferBinding,
45 CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor,
46 DynamicBindGroupLayoutEntries, PipelineCache, PushConstantRange, RawBufferVec, Shader,
47 ShaderStages, ShaderType, SpecializedComputePipeline, SpecializedComputePipelines,
48 TextureSampleType, UninitBufferVec,
49 },
50 renderer::{RenderContext, RenderDevice, RenderQueue},
51 settings::WgpuFeatures,
52 view::{ExtractedView, NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms},
53 Render, RenderApp, RenderSet,
54};
55use bevy_utils::TypeIdMap;
56use bitflags::bitflags;
57use smallvec::{smallvec, SmallVec};
58use tracing::warn;
59
60use crate::{
61 graph::NodePbr, MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform,
62};
63
64use super::{ShadowView, ViewLightEntities};
65
66pub const MESH_PREPROCESS_SHADER_HANDLE: Handle<Shader> =
68 weak_handle!("c8579292-cf92-43b5-9c5a-ec5bd4e44d12");
69pub const RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE: Handle<Shader> =
71 weak_handle!("045fb176-58e2-4e76-b241-7688d761bb23");
72pub const BUILD_INDIRECT_PARAMS_SHADER_HANDLE: Handle<Shader> =
74 weak_handle!("133b01f0-3eaf-4590-9ee9-f0cf91a00b71");
75
76const WORKGROUP_SIZE: usize = 64;
78
79pub struct GpuMeshPreprocessPlugin {
84 pub use_gpu_instance_buffer_builder: bool,
89}
90
91#[derive(Default)]
95pub struct ClearIndirectParametersMetadataNode;
96
97pub struct EarlyGpuPreprocessNode {
105 view_query: QueryState<
106 (
107 Read<ExtractedView>,
108 Option<Read<PreprocessBindGroups>>,
109 Option<Read<ViewUniformOffset>>,
110 Has<NoIndirectDrawing>,
111 Has<OcclusionCulling>,
112 ),
113 Without<SkipGpuPreprocess>,
114 >,
115 main_view_query: QueryState<Read<ViewLightEntities>>,
116}
117
118pub struct LateGpuPreprocessNode {
126 view_query: QueryState<
127 (
128 Read<ExtractedView>,
129 Read<PreprocessBindGroups>,
130 Read<ViewUniformOffset>,
131 ),
132 (
133 Without<SkipGpuPreprocess>,
134 Without<NoIndirectDrawing>,
135 With<OcclusionCulling>,
136 With<DepthPrepass>,
137 ),
138 >,
139}
140
141pub struct EarlyPrepassBuildIndirectParametersNode {
149 view_query: QueryState<
150 Read<PreprocessBindGroups>,
151 (
152 Without<SkipGpuPreprocess>,
153 Without<NoIndirectDrawing>,
154 Or<(With<DepthPrepass>, With<ShadowView>)>,
155 ),
156 >,
157}
158
159pub struct LatePrepassBuildIndirectParametersNode {
168 view_query: QueryState<
169 Read<PreprocessBindGroups>,
170 (
171 Without<SkipGpuPreprocess>,
172 Without<NoIndirectDrawing>,
173 Or<(With<DepthPrepass>, With<ShadowView>)>,
174 With<OcclusionCulling>,
175 ),
176 >,
177}
178
179pub struct MainBuildIndirectParametersNode {
188 view_query: QueryState<
189 Read<PreprocessBindGroups>,
190 (Without<SkipGpuPreprocess>, Without<NoIndirectDrawing>),
191 >,
192}
193
194#[derive(Resource)]
197pub struct PreprocessPipelines {
198 pub direct_preprocess: PreprocessPipeline,
201 pub gpu_frustum_culling_preprocess: PreprocessPipeline,
206 pub early_gpu_occlusion_culling_preprocess: PreprocessPipeline,
211 pub late_gpu_occlusion_culling_preprocess: PreprocessPipeline,
216 pub gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
219 pub gpu_frustum_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
223 pub early_phase: PreprocessPhasePipelines,
226 pub late_phase: PreprocessPhasePipelines,
230 pub main_phase: PreprocessPhasePipelines,
232}
233
234#[derive(Clone)]
238pub struct PreprocessPhasePipelines {
239 pub reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline,
242 pub gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline,
247 pub gpu_occlusion_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline,
252}
253
254pub struct PreprocessPipeline {
256 pub bind_group_layout: BindGroupLayout,
258 pub pipeline_id: Option<CachedComputePipelineId>,
262}
263
264#[derive(Clone)]
269pub struct ResetIndirectBatchSetsPipeline {
270 pub bind_group_layout: BindGroupLayout,
272 pub pipeline_id: Option<CachedComputePipelineId>,
276}
277
278#[derive(Clone)]
280pub struct BuildIndirectParametersPipeline {
281 pub bind_group_layout: BindGroupLayout,
283 pub pipeline_id: Option<CachedComputePipelineId>,
287}
288
289bitflags! {
290 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
292 pub struct PreprocessPipelineKey: u8 {
293 const FRUSTUM_CULLING = 1;
297 const OCCLUSION_CULLING = 2;
301 const EARLY_PHASE = 4;
305 }
306
307 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
309 pub struct BuildIndirectParametersPipelineKey: u8 {
310 const INDEXED = 1;
315 const MULTI_DRAW_INDIRECT_COUNT_SUPPORTED = 2;
319 const OCCLUSION_CULLING = 4;
323 const EARLY_PHASE = 8;
327 const LATE_PHASE = 16;
331 const MAIN_PHASE = 32;
337 }
338}
339
340#[derive(Component, Clone, Deref, DerefMut)]
347pub struct PreprocessBindGroups(pub TypeIdMap<PhasePreprocessBindGroups>);
348
349#[derive(Clone)]
352pub enum PhasePreprocessBindGroups {
353 Direct(BindGroup),
359
360 IndirectFrustumCulling {
366 indexed: Option<BindGroup>,
368 non_indexed: Option<BindGroup>,
370 },
371
372 IndirectOcclusionCulling {
380 early_indexed: Option<BindGroup>,
383 early_non_indexed: Option<BindGroup>,
386 late_indexed: Option<BindGroup>,
389 late_non_indexed: Option<BindGroup>,
392 },
393}
394
395#[derive(Resource, Default, Deref, DerefMut)]
401pub struct BuildIndirectParametersBindGroups(pub TypeIdMap<PhaseBuildIndirectParametersBindGroups>);
402
403impl BuildIndirectParametersBindGroups {
404 pub fn new() -> BuildIndirectParametersBindGroups {
406 Self::default()
407 }
408}
409
410pub struct PhaseBuildIndirectParametersBindGroups {
413 reset_indexed_indirect_batch_sets: Option<BindGroup>,
416 reset_non_indexed_indirect_batch_sets: Option<BindGroup>,
419 build_indexed_indirect: Option<BindGroup>,
422 build_non_indexed_indirect: Option<BindGroup>,
425}
426
427#[derive(Component, Default)]
430pub struct SkipGpuPreprocess;
431
432impl Plugin for GpuMeshPreprocessPlugin {
433 fn build(&self, app: &mut App) {
434 load_internal_asset!(
435 app,
436 MESH_PREPROCESS_SHADER_HANDLE,
437 "mesh_preprocess.wgsl",
438 Shader::from_wgsl
439 );
440 load_internal_asset!(
441 app,
442 RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE,
443 "reset_indirect_batch_sets.wgsl",
444 Shader::from_wgsl
445 );
446 load_internal_asset!(
447 app,
448 BUILD_INDIRECT_PARAMS_SHADER_HANDLE,
449 "build_indirect_params.wgsl",
450 Shader::from_wgsl
451 );
452 }
453
454 fn finish(&self, app: &mut App) {
455 let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
456 return;
457 };
458
459 let gpu_preprocessing_support = render_app.world().resource::<GpuPreprocessingSupport>();
462 if !self.use_gpu_instance_buffer_builder || !gpu_preprocessing_support.is_available() {
463 return;
464 }
465
466 render_app
467 .init_resource::<PreprocessPipelines>()
468 .init_resource::<SpecializedComputePipelines<PreprocessPipeline>>()
469 .init_resource::<SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>>()
470 .init_resource::<SpecializedComputePipelines<BuildIndirectParametersPipeline>>()
471 .add_systems(
472 Render,
473 (
474 prepare_preprocess_pipelines.in_set(RenderSet::Prepare),
475 prepare_preprocess_bind_groups
476 .run_if(resource_exists::<BatchedInstanceBuffers<
477 MeshUniform,
478 MeshInputUniform
479 >>)
480 .in_set(RenderSet::PrepareBindGroups),
481 write_mesh_culling_data_buffer.in_set(RenderSet::PrepareResourcesFlush),
482 ),
483 )
484 .add_render_graph_node::<ClearIndirectParametersMetadataNode>(
485 Core3d,
486 NodePbr::ClearIndirectParametersMetadata
487 )
488 .add_render_graph_node::<EarlyGpuPreprocessNode>(Core3d, NodePbr::EarlyGpuPreprocess)
489 .add_render_graph_node::<LateGpuPreprocessNode>(Core3d, NodePbr::LateGpuPreprocess)
490 .add_render_graph_node::<EarlyPrepassBuildIndirectParametersNode>(
491 Core3d,
492 NodePbr::EarlyPrepassBuildIndirectParameters,
493 )
494 .add_render_graph_node::<LatePrepassBuildIndirectParametersNode>(
495 Core3d,
496 NodePbr::LatePrepassBuildIndirectParameters,
497 )
498 .add_render_graph_node::<MainBuildIndirectParametersNode>(
499 Core3d,
500 NodePbr::MainBuildIndirectParameters,
501 )
502 .add_render_graph_edges(
503 Core3d,
504 (
505 NodePbr::ClearIndirectParametersMetadata,
506 NodePbr::EarlyGpuPreprocess,
507 NodePbr::EarlyPrepassBuildIndirectParameters,
508 Node3d::EarlyPrepass,
509 Node3d::EarlyDeferredPrepass,
510 Node3d::EarlyDownsampleDepth,
511 NodePbr::LateGpuPreprocess,
512 NodePbr::LatePrepassBuildIndirectParameters,
513 Node3d::LatePrepass,
514 Node3d::LateDeferredPrepass,
515 NodePbr::MainBuildIndirectParameters,
516 Node3d::StartMainPass,
517 ),
518 ).add_render_graph_edges(
519 Core3d,
520 (
521 NodePbr::EarlyPrepassBuildIndirectParameters,
522 NodePbr::EarlyShadowPass,
523 Node3d::EarlyDownsampleDepth,
524 )
525 ).add_render_graph_edges(
526 Core3d,
527 (
528 NodePbr::LatePrepassBuildIndirectParameters,
529 NodePbr::LateShadowPass,
530 NodePbr::MainBuildIndirectParameters,
531 )
532 );
533 }
534}
535
536impl Node for ClearIndirectParametersMetadataNode {
537 fn run<'w>(
538 &self,
539 _: &mut RenderGraphContext,
540 render_context: &mut RenderContext<'w>,
541 world: &'w World,
542 ) -> Result<(), NodeRunError> {
543 let Some(indirect_parameters_buffers) = world.get_resource::<IndirectParametersBuffers>()
544 else {
545 return Ok(());
546 };
547
548 for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() {
550 if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
551 .indexed
552 .gpu_metadata_buffer()
553 {
554 render_context.command_encoder().clear_buffer(
555 indexed_gpu_metadata_buffer,
556 0,
557 Some(
558 phase_indirect_parameters_buffers.indexed.batch_count() as u64
559 * size_of::<IndirectParametersGpuMetadata>() as u64,
560 ),
561 );
562 }
563
564 if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers
565 .non_indexed
566 .gpu_metadata_buffer()
567 {
568 render_context.command_encoder().clear_buffer(
569 non_indexed_gpu_metadata_buffer,
570 0,
571 Some(
572 phase_indirect_parameters_buffers.non_indexed.batch_count() as u64
573 * size_of::<IndirectParametersGpuMetadata>() as u64,
574 ),
575 );
576 }
577 }
578
579 Ok(())
580 }
581}
582
583impl FromWorld for EarlyGpuPreprocessNode {
584 fn from_world(world: &mut World) -> Self {
585 Self {
586 view_query: QueryState::new(world),
587 main_view_query: QueryState::new(world),
588 }
589 }
590}
591
592impl Node for EarlyGpuPreprocessNode {
593 fn update(&mut self, world: &mut World) {
594 self.view_query.update_archetypes(world);
595 self.main_view_query.update_archetypes(world);
596 }
597
598 fn run<'w>(
599 &self,
600 graph: &mut RenderGraphContext,
601 render_context: &mut RenderContext<'w>,
602 world: &'w World,
603 ) -> Result<(), NodeRunError> {
604 let batched_instance_buffers =
606 world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
607
608 let pipeline_cache = world.resource::<PipelineCache>();
609 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
610
611 let mut compute_pass =
612 render_context
613 .command_encoder()
614 .begin_compute_pass(&ComputePassDescriptor {
615 label: Some("early mesh preprocessing"),
616 timestamp_writes: None,
617 });
618
619 let mut all_views: SmallVec<[_; 8]> = SmallVec::new();
620 all_views.push(graph.view_entity());
621 if let Ok(shadow_cascade_views) =
622 self.main_view_query.get_manual(world, graph.view_entity())
623 {
624 all_views.extend(shadow_cascade_views.lights.iter().copied());
625 }
626
627 for view_entity in all_views {
630 let Ok((
631 view,
632 bind_groups,
633 view_uniform_offset,
634 no_indirect_drawing,
635 occlusion_culling,
636 )) = self.view_query.get_manual(world, view_entity)
637 else {
638 continue;
639 };
640
641 let Some(bind_groups) = bind_groups else {
642 continue;
643 };
644 let Some(view_uniform_offset) = view_uniform_offset else {
645 continue;
646 };
647
648 let maybe_pipeline_id = if no_indirect_drawing {
651 preprocess_pipelines.direct_preprocess.pipeline_id
652 } else if occlusion_culling {
653 preprocess_pipelines
654 .early_gpu_occlusion_culling_preprocess
655 .pipeline_id
656 } else {
657 preprocess_pipelines
658 .gpu_frustum_culling_preprocess
659 .pipeline_id
660 };
661
662 let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
664 warn!("The build mesh uniforms pipeline wasn't ready");
665 continue;
666 };
667
668 let Some(preprocess_pipeline) =
669 pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
670 else {
671 continue;
673 };
674
675 compute_pass.set_pipeline(preprocess_pipeline);
676
677 for (phase_type_id, batched_phase_instance_buffers) in
679 &batched_instance_buffers.phase_instance_buffers
680 {
681 let Some(work_item_buffers) = batched_phase_instance_buffers
683 .work_item_buffers
684 .get(&view.retained_view_entity)
685 else {
686 continue;
687 };
688
689 let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else {
691 continue;
692 };
693
694 let dynamic_offsets = [view_uniform_offset.offset];
698
699 match *phase_bind_groups {
701 PhasePreprocessBindGroups::Direct(ref bind_group) => {
702 let PreprocessWorkItemBuffers::Direct(work_item_buffer) = work_item_buffers
705 else {
706 continue;
707 };
708 compute_pass.set_bind_group(0, bind_group, &dynamic_offsets);
709 let workgroup_count = work_item_buffer.len().div_ceil(WORKGROUP_SIZE);
710 if workgroup_count > 0 {
711 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
712 }
713 }
714
715 PhasePreprocessBindGroups::IndirectFrustumCulling {
716 indexed: ref maybe_indexed_bind_group,
717 non_indexed: ref maybe_non_indexed_bind_group,
718 }
719 | PhasePreprocessBindGroups::IndirectOcclusionCulling {
720 early_indexed: ref maybe_indexed_bind_group,
721 early_non_indexed: ref maybe_non_indexed_bind_group,
722 ..
723 } => {
724 let PreprocessWorkItemBuffers::Indirect {
727 indexed: indexed_buffer,
728 non_indexed: non_indexed_buffer,
729 ..
730 } = work_item_buffers
731 else {
732 continue;
733 };
734
735 if let Some(indexed_bind_group) = maybe_indexed_bind_group {
737 if let PreprocessWorkItemBuffers::Indirect {
738 gpu_occlusion_culling:
739 Some(GpuOcclusionCullingWorkItemBuffers {
740 late_indirect_parameters_indexed_offset,
741 ..
742 }),
743 ..
744 } = *work_item_buffers
745 {
746 compute_pass.set_push_constants(
747 0,
748 bytemuck::bytes_of(&late_indirect_parameters_indexed_offset),
749 );
750 }
751
752 compute_pass.set_bind_group(0, indexed_bind_group, &dynamic_offsets);
753 let workgroup_count = indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
754 if workgroup_count > 0 {
755 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
756 }
757 }
758
759 if let Some(non_indexed_bind_group) = maybe_non_indexed_bind_group {
761 if let PreprocessWorkItemBuffers::Indirect {
762 gpu_occlusion_culling:
763 Some(GpuOcclusionCullingWorkItemBuffers {
764 late_indirect_parameters_non_indexed_offset,
765 ..
766 }),
767 ..
768 } = *work_item_buffers
769 {
770 compute_pass.set_push_constants(
771 0,
772 bytemuck::bytes_of(
773 &late_indirect_parameters_non_indexed_offset,
774 ),
775 );
776 }
777
778 compute_pass.set_bind_group(
779 0,
780 non_indexed_bind_group,
781 &dynamic_offsets,
782 );
783 let workgroup_count = non_indexed_buffer.len().div_ceil(WORKGROUP_SIZE);
784 if workgroup_count > 0 {
785 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
786 }
787 }
788 }
789 }
790 }
791 }
792
793 Ok(())
794 }
795}
796
797impl FromWorld for EarlyPrepassBuildIndirectParametersNode {
798 fn from_world(world: &mut World) -> Self {
799 Self {
800 view_query: QueryState::new(world),
801 }
802 }
803}
804
805impl FromWorld for LatePrepassBuildIndirectParametersNode {
806 fn from_world(world: &mut World) -> Self {
807 Self {
808 view_query: QueryState::new(world),
809 }
810 }
811}
812
813impl FromWorld for MainBuildIndirectParametersNode {
814 fn from_world(world: &mut World) -> Self {
815 Self {
816 view_query: QueryState::new(world),
817 }
818 }
819}
820
821impl FromWorld for LateGpuPreprocessNode {
822 fn from_world(world: &mut World) -> Self {
823 Self {
824 view_query: QueryState::new(world),
825 }
826 }
827}
828
829impl Node for LateGpuPreprocessNode {
830 fn update(&mut self, world: &mut World) {
831 self.view_query.update_archetypes(world);
832 }
833
834 fn run<'w>(
835 &self,
836 _: &mut RenderGraphContext,
837 render_context: &mut RenderContext<'w>,
838 world: &'w World,
839 ) -> Result<(), NodeRunError> {
840 let batched_instance_buffers =
842 world.resource::<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>();
843
844 let pipeline_cache = world.resource::<PipelineCache>();
845 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
846
847 let mut compute_pass =
848 render_context
849 .command_encoder()
850 .begin_compute_pass(&ComputePassDescriptor {
851 label: Some("late mesh preprocessing"),
852 timestamp_writes: None,
853 });
854
855 for (view, bind_groups, view_uniform_offset) in self.view_query.iter_manual(world) {
857 let maybe_pipeline_id = preprocess_pipelines
858 .late_gpu_occlusion_culling_preprocess
859 .pipeline_id;
860
861 let Some(preprocess_pipeline_id) = maybe_pipeline_id else {
863 warn!("The build mesh uniforms pipeline wasn't ready");
864 return Ok(());
865 };
866
867 let Some(preprocess_pipeline) =
868 pipeline_cache.get_compute_pipeline(preprocess_pipeline_id)
869 else {
870 return Ok(());
872 };
873
874 compute_pass.set_pipeline(preprocess_pipeline);
875
876 for (phase_type_id, batched_phase_instance_buffers) in
879 &batched_instance_buffers.phase_instance_buffers
880 {
881 let UntypedPhaseBatchedInstanceBuffers {
882 ref work_item_buffers,
883 ref late_indexed_indirect_parameters_buffer,
884 ref late_non_indexed_indirect_parameters_buffer,
885 ..
886 } = *batched_phase_instance_buffers;
887
888 let Some(phase_work_item_buffers) =
890 work_item_buffers.get(&view.retained_view_entity)
891 else {
892 continue;
893 };
894
895 let (
896 PreprocessWorkItemBuffers::Indirect {
897 gpu_occlusion_culling:
898 Some(GpuOcclusionCullingWorkItemBuffers {
899 late_indirect_parameters_indexed_offset,
900 late_indirect_parameters_non_indexed_offset,
901 ..
902 }),
903 ..
904 },
905 Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
906 late_indexed: maybe_late_indexed_bind_group,
907 late_non_indexed: maybe_late_non_indexed_bind_group,
908 ..
909 }),
910 Some(late_indexed_indirect_parameters_buffer),
911 Some(late_non_indexed_indirect_parameters_buffer),
912 ) = (
913 phase_work_item_buffers,
914 bind_groups.get(phase_type_id),
915 late_indexed_indirect_parameters_buffer.buffer(),
916 late_non_indexed_indirect_parameters_buffer.buffer(),
917 )
918 else {
919 continue;
920 };
921
922 let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![];
923 dynamic_offsets.push(view_uniform_offset.offset);
924
925 if let Some(late_indexed_bind_group) = maybe_late_indexed_bind_group {
932 compute_pass.set_push_constants(
933 0,
934 bytemuck::bytes_of(late_indirect_parameters_indexed_offset),
935 );
936
937 compute_pass.set_bind_group(0, late_indexed_bind_group, &dynamic_offsets);
938 compute_pass.dispatch_workgroups_indirect(
939 late_indexed_indirect_parameters_buffer,
940 (*late_indirect_parameters_indexed_offset as u64)
941 * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
942 );
943 }
944
945 if let Some(late_non_indexed_bind_group) = maybe_late_non_indexed_bind_group {
947 compute_pass.set_push_constants(
948 0,
949 bytemuck::bytes_of(late_indirect_parameters_non_indexed_offset),
950 );
951
952 compute_pass.set_bind_group(0, late_non_indexed_bind_group, &dynamic_offsets);
953 compute_pass.dispatch_workgroups_indirect(
954 late_non_indexed_indirect_parameters_buffer,
955 (*late_indirect_parameters_non_indexed_offset as u64)
956 * (size_of::<LatePreprocessWorkItemIndirectParameters>() as u64),
957 );
958 }
959 }
960 }
961
962 Ok(())
963 }
964}
965
966impl Node for EarlyPrepassBuildIndirectParametersNode {
967 fn update(&mut self, world: &mut World) {
968 self.view_query.update_archetypes(world);
969 }
970
971 fn run<'w>(
972 &self,
973 _: &mut RenderGraphContext,
974 render_context: &mut RenderContext<'w>,
975 world: &'w World,
976 ) -> Result<(), NodeRunError> {
977 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
978
979 if self.view_query.iter_manual(world).next().is_none() {
982 return Ok(());
983 }
984
985 run_build_indirect_parameters_node(
986 render_context,
987 world,
988 &preprocess_pipelines.early_phase,
989 "early prepass indirect parameters building",
990 )
991 }
992}
993
994impl Node for LatePrepassBuildIndirectParametersNode {
995 fn update(&mut self, world: &mut World) {
996 self.view_query.update_archetypes(world);
997 }
998
999 fn run<'w>(
1000 &self,
1001 _: &mut RenderGraphContext,
1002 render_context: &mut RenderContext<'w>,
1003 world: &'w World,
1004 ) -> Result<(), NodeRunError> {
1005 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
1006
1007 if self.view_query.iter_manual(world).next().is_none() {
1010 return Ok(());
1011 }
1012
1013 run_build_indirect_parameters_node(
1014 render_context,
1015 world,
1016 &preprocess_pipelines.late_phase,
1017 "late prepass indirect parameters building",
1018 )
1019 }
1020}
1021
1022impl Node for MainBuildIndirectParametersNode {
1023 fn update(&mut self, world: &mut World) {
1024 self.view_query.update_archetypes(world);
1025 }
1026
1027 fn run<'w>(
1028 &self,
1029 _: &mut RenderGraphContext,
1030 render_context: &mut RenderContext<'w>,
1031 world: &'w World,
1032 ) -> Result<(), NodeRunError> {
1033 let preprocess_pipelines = world.resource::<PreprocessPipelines>();
1034
1035 run_build_indirect_parameters_node(
1036 render_context,
1037 world,
1038 &preprocess_pipelines.main_phase,
1039 "main indirect parameters building",
1040 )
1041 }
1042}
1043
1044fn run_build_indirect_parameters_node(
1045 render_context: &mut RenderContext,
1046 world: &World,
1047 preprocess_phase_pipelines: &PreprocessPhasePipelines,
1048 label: &'static str,
1049) -> Result<(), NodeRunError> {
1050 let Some(build_indirect_params_bind_groups) =
1051 world.get_resource::<BuildIndirectParametersBindGroups>()
1052 else {
1053 return Ok(());
1054 };
1055
1056 let pipeline_cache = world.resource::<PipelineCache>();
1057 let indirect_parameters_buffers = world.resource::<IndirectParametersBuffers>();
1058
1059 let mut compute_pass =
1060 render_context
1061 .command_encoder()
1062 .begin_compute_pass(&ComputePassDescriptor {
1063 label: Some(label),
1064 timestamp_writes: None,
1065 });
1066
1067 let (
1069 Some(reset_indirect_batch_sets_pipeline_id),
1070 Some(build_indexed_indirect_params_pipeline_id),
1071 Some(build_non_indexed_indirect_params_pipeline_id),
1072 ) = (
1073 preprocess_phase_pipelines
1074 .reset_indirect_batch_sets
1075 .pipeline_id,
1076 preprocess_phase_pipelines
1077 .gpu_occlusion_culling_build_indexed_indirect_params
1078 .pipeline_id,
1079 preprocess_phase_pipelines
1080 .gpu_occlusion_culling_build_non_indexed_indirect_params
1081 .pipeline_id,
1082 )
1083 else {
1084 warn!("The build indirect parameters pipelines weren't ready");
1085 return Ok(());
1086 };
1087
1088 let (
1089 Some(reset_indirect_batch_sets_pipeline),
1090 Some(build_indexed_indirect_params_pipeline),
1091 Some(build_non_indexed_indirect_params_pipeline),
1092 ) = (
1093 pipeline_cache.get_compute_pipeline(reset_indirect_batch_sets_pipeline_id),
1094 pipeline_cache.get_compute_pipeline(build_indexed_indirect_params_pipeline_id),
1095 pipeline_cache.get_compute_pipeline(build_non_indexed_indirect_params_pipeline_id),
1096 )
1097 else {
1098 return Ok(());
1100 };
1101
1102 for (phase_type_id, phase_build_indirect_params_bind_groups) in
1105 build_indirect_params_bind_groups.iter()
1106 {
1107 let Some(phase_indirect_parameters_buffers) =
1108 indirect_parameters_buffers.get(phase_type_id)
1109 else {
1110 continue;
1111 };
1112
1113 if let (
1115 Some(reset_indexed_indirect_batch_sets_bind_group),
1116 Some(build_indirect_indexed_params_bind_group),
1117 ) = (
1118 &phase_build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets,
1119 &phase_build_indirect_params_bind_groups.build_indexed_indirect,
1120 ) {
1121 compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1122 compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]);
1123 let workgroup_count = phase_indirect_parameters_buffers
1124 .batch_set_count(true)
1125 .div_ceil(WORKGROUP_SIZE);
1126 if workgroup_count > 0 {
1127 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1128 }
1129
1130 compute_pass.set_pipeline(build_indexed_indirect_params_pipeline);
1131 compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]);
1132 let workgroup_count = phase_indirect_parameters_buffers
1133 .indexed
1134 .batch_count()
1135 .div_ceil(WORKGROUP_SIZE);
1136 if workgroup_count > 0 {
1137 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1138 }
1139 }
1140
1141 if let (
1143 Some(reset_non_indexed_indirect_batch_sets_bind_group),
1144 Some(build_indirect_non_indexed_params_bind_group),
1145 ) = (
1146 &phase_build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets,
1147 &phase_build_indirect_params_bind_groups.build_non_indexed_indirect,
1148 ) {
1149 compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline);
1150 compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]);
1151 let workgroup_count = phase_indirect_parameters_buffers
1152 .batch_set_count(false)
1153 .div_ceil(WORKGROUP_SIZE);
1154 if workgroup_count > 0 {
1155 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1156 }
1157
1158 compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline);
1159 compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]);
1160 let workgroup_count = phase_indirect_parameters_buffers
1161 .non_indexed
1162 .batch_count()
1163 .div_ceil(WORKGROUP_SIZE);
1164 if workgroup_count > 0 {
1165 compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1);
1166 }
1167 }
1168 }
1169
1170 Ok(())
1171}
1172
1173impl PreprocessPipelines {
1174 pub(crate) fn pipelines_are_loaded(
1177 &self,
1178 pipeline_cache: &PipelineCache,
1179 preprocessing_support: &GpuPreprocessingSupport,
1180 ) -> bool {
1181 match preprocessing_support.max_supported_mode {
1182 GpuPreprocessingMode::None => false,
1183 GpuPreprocessingMode::PreprocessingOnly => {
1184 self.direct_preprocess.is_loaded(pipeline_cache)
1185 && self
1186 .gpu_frustum_culling_preprocess
1187 .is_loaded(pipeline_cache)
1188 }
1189 GpuPreprocessingMode::Culling => {
1190 self.direct_preprocess.is_loaded(pipeline_cache)
1191 && self
1192 .gpu_frustum_culling_preprocess
1193 .is_loaded(pipeline_cache)
1194 && self
1195 .early_gpu_occlusion_culling_preprocess
1196 .is_loaded(pipeline_cache)
1197 && self
1198 .late_gpu_occlusion_culling_preprocess
1199 .is_loaded(pipeline_cache)
1200 && self
1201 .gpu_frustum_culling_build_indexed_indirect_params
1202 .is_loaded(pipeline_cache)
1203 && self
1204 .gpu_frustum_culling_build_non_indexed_indirect_params
1205 .is_loaded(pipeline_cache)
1206 && self.early_phase.is_loaded(pipeline_cache)
1207 && self.late_phase.is_loaded(pipeline_cache)
1208 && self.main_phase.is_loaded(pipeline_cache)
1209 }
1210 }
1211 }
1212}
1213
1214impl PreprocessPhasePipelines {
1215 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1216 self.reset_indirect_batch_sets.is_loaded(pipeline_cache)
1217 && self
1218 .gpu_occlusion_culling_build_indexed_indirect_params
1219 .is_loaded(pipeline_cache)
1220 && self
1221 .gpu_occlusion_culling_build_non_indexed_indirect_params
1222 .is_loaded(pipeline_cache)
1223 }
1224}
1225
1226impl PreprocessPipeline {
1227 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1228 self.pipeline_id
1229 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1230 }
1231}
1232
1233impl ResetIndirectBatchSetsPipeline {
1234 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1235 self.pipeline_id
1236 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1237 }
1238}
1239
1240impl BuildIndirectParametersPipeline {
1241 fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool {
1244 self.pipeline_id
1245 .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some())
1246 }
1247}
1248
1249impl SpecializedComputePipeline for PreprocessPipeline {
1250 type Key = PreprocessPipelineKey;
1251
1252 fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1253 let mut shader_defs = vec!["WRITE_INDIRECT_PARAMETERS_METADATA".into()];
1254 if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1255 shader_defs.push("INDIRECT".into());
1256 shader_defs.push("FRUSTUM_CULLING".into());
1257 }
1258 if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1259 shader_defs.push("OCCLUSION_CULLING".into());
1260 if key.contains(PreprocessPipelineKey::EARLY_PHASE) {
1261 shader_defs.push("EARLY_PHASE".into());
1262 } else {
1263 shader_defs.push("LATE_PHASE".into());
1264 }
1265 }
1266
1267 ComputePipelineDescriptor {
1268 label: Some(
1269 format!(
1270 "mesh preprocessing ({})",
1271 if key.contains(
1272 PreprocessPipelineKey::OCCLUSION_CULLING
1273 | PreprocessPipelineKey::EARLY_PHASE
1274 ) {
1275 "early GPU occlusion culling"
1276 } else if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1277 "late GPU occlusion culling"
1278 } else if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) {
1279 "GPU frustum culling"
1280 } else {
1281 "direct"
1282 }
1283 )
1284 .into(),
1285 ),
1286 layout: vec![self.bind_group_layout.clone()],
1287 push_constant_ranges: if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) {
1288 vec![PushConstantRange {
1289 stages: ShaderStages::COMPUTE,
1290 range: 0..4,
1291 }]
1292 } else {
1293 vec![]
1294 },
1295 shader: MESH_PREPROCESS_SHADER_HANDLE,
1296 shader_defs,
1297 entry_point: "main".into(),
1298 zero_initialize_workgroup_memory: false,
1299 }
1300 }
1301}
1302
1303impl FromWorld for PreprocessPipelines {
1304 fn from_world(world: &mut World) -> Self {
1305 let render_device = world.resource::<RenderDevice>();
1306
1307 let direct_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries();
1310 let gpu_frustum_culling_bind_group_layout_entries = gpu_culling_bind_group_layout_entries();
1311 let gpu_early_occlusion_culling_bind_group_layout_entries =
1312 gpu_occlusion_culling_bind_group_layout_entries().extend_with_indices(((
1313 11,
1314 storage_buffer::<PreprocessWorkItem>(false),
1315 ),));
1316 let gpu_late_occlusion_culling_bind_group_layout_entries =
1317 gpu_occlusion_culling_bind_group_layout_entries();
1318
1319 let reset_indirect_batch_sets_bind_group_layout_entries =
1320 DynamicBindGroupLayoutEntries::sequential(
1321 ShaderStages::COMPUTE,
1322 (storage_buffer::<IndirectBatchSet>(false),),
1323 );
1324
1325 let build_indexed_indirect_params_bind_group_layout_entries =
1328 build_indirect_params_bind_group_layout_entries()
1329 .extend_sequential((storage_buffer::<IndirectParametersIndexed>(false),));
1330 let build_non_indexed_indirect_params_bind_group_layout_entries =
1331 build_indirect_params_bind_group_layout_entries()
1332 .extend_sequential((storage_buffer::<IndirectParametersNonIndexed>(false),));
1333
1334 let direct_bind_group_layout = render_device.create_bind_group_layout(
1336 "build mesh uniforms direct bind group layout",
1337 &direct_bind_group_layout_entries,
1338 );
1339 let gpu_frustum_culling_bind_group_layout = render_device.create_bind_group_layout(
1340 "build mesh uniforms GPU frustum culling bind group layout",
1341 &gpu_frustum_culling_bind_group_layout_entries,
1342 );
1343 let gpu_early_occlusion_culling_bind_group_layout = render_device.create_bind_group_layout(
1344 "build mesh uniforms GPU early occlusion culling bind group layout",
1345 &gpu_early_occlusion_culling_bind_group_layout_entries,
1346 );
1347 let gpu_late_occlusion_culling_bind_group_layout = render_device.create_bind_group_layout(
1348 "build mesh uniforms GPU late occlusion culling bind group layout",
1349 &gpu_late_occlusion_culling_bind_group_layout_entries,
1350 );
1351 let reset_indirect_batch_sets_bind_group_layout = render_device.create_bind_group_layout(
1352 "reset indirect batch sets bind group layout",
1353 &reset_indirect_batch_sets_bind_group_layout_entries,
1354 );
1355 let build_indexed_indirect_params_bind_group_layout = render_device
1356 .create_bind_group_layout(
1357 "build indexed indirect parameters bind group layout",
1358 &build_indexed_indirect_params_bind_group_layout_entries,
1359 );
1360 let build_non_indexed_indirect_params_bind_group_layout = render_device
1361 .create_bind_group_layout(
1362 "build non-indexed indirect parameters bind group layout",
1363 &build_non_indexed_indirect_params_bind_group_layout_entries,
1364 );
1365
1366 let preprocess_phase_pipelines = PreprocessPhasePipelines {
1367 reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline {
1368 bind_group_layout: reset_indirect_batch_sets_bind_group_layout.clone(),
1369 pipeline_id: None,
1370 },
1371 gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1372 bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1373 pipeline_id: None,
1374 },
1375 gpu_occlusion_culling_build_non_indexed_indirect_params:
1376 BuildIndirectParametersPipeline {
1377 bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1378 pipeline_id: None,
1379 },
1380 };
1381
1382 PreprocessPipelines {
1383 direct_preprocess: PreprocessPipeline {
1384 bind_group_layout: direct_bind_group_layout,
1385 pipeline_id: None,
1386 },
1387 gpu_frustum_culling_preprocess: PreprocessPipeline {
1388 bind_group_layout: gpu_frustum_culling_bind_group_layout,
1389 pipeline_id: None,
1390 },
1391 early_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1392 bind_group_layout: gpu_early_occlusion_culling_bind_group_layout,
1393 pipeline_id: None,
1394 },
1395 late_gpu_occlusion_culling_preprocess: PreprocessPipeline {
1396 bind_group_layout: gpu_late_occlusion_culling_bind_group_layout,
1397 pipeline_id: None,
1398 },
1399 gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline {
1400 bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(),
1401 pipeline_id: None,
1402 },
1403 gpu_frustum_culling_build_non_indexed_indirect_params:
1404 BuildIndirectParametersPipeline {
1405 bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(),
1406 pipeline_id: None,
1407 },
1408 early_phase: preprocess_phase_pipelines.clone(),
1409 late_phase: preprocess_phase_pipelines.clone(),
1410 main_phase: preprocess_phase_pipelines.clone(),
1411 }
1412 }
1413}
1414
1415fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1416 DynamicBindGroupLayoutEntries::new_with_indices(
1417 ShaderStages::COMPUTE,
1418 (
1419 (
1421 0,
1422 uniform_buffer::<ViewUniform>(true),
1423 ),
1424 (3, storage_buffer_read_only::<MeshInputUniform>(false)),
1426 (4, storage_buffer_read_only::<MeshInputUniform>(false)),
1428 (5, storage_buffer_read_only::<PreprocessWorkItem>(false)),
1430 (6, storage_buffer::<MeshUniform>(false)),
1432 ),
1433 )
1434}
1435
1436fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1439 DynamicBindGroupLayoutEntries::new_with_indices(
1440 ShaderStages::COMPUTE,
1441 (
1442 (0, storage_buffer_read_only::<MeshInputUniform>(false)),
1443 (
1444 1,
1445 storage_buffer_read_only::<IndirectParametersCpuMetadata>(false),
1446 ),
1447 (
1448 2,
1449 storage_buffer_read_only::<IndirectParametersGpuMetadata>(false),
1450 ),
1451 (3, storage_buffer::<IndirectBatchSet>(false)),
1452 ),
1453 )
1454}
1455
1456fn gpu_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1459 preprocess_direct_bind_group_layout_entries().extend_with_indices((
1462 (
1464 7,
1465 storage_buffer_read_only::<IndirectParametersCpuMetadata>(
1466 false,
1467 ),
1468 ),
1469 (
1471 8,
1472 storage_buffer::<IndirectParametersGpuMetadata>(false),
1473 ),
1474 (
1476 9,
1477 storage_buffer_read_only::<MeshCullingData>(false),
1478 ),
1479 ))
1480}
1481
1482fn gpu_occlusion_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries {
1483 gpu_culling_bind_group_layout_entries().extend_with_indices((
1484 (
1485 2,
1486 uniform_buffer::<PreviousViewData>(false),
1487 ),
1488 (
1489 10,
1490 texture_2d(TextureSampleType::Float { filterable: true }),
1491 ),
1492 (
1493 12,
1494 storage_buffer::<LatePreprocessWorkItemIndirectParameters>(
1495 false,
1496 ),
1497 ),
1498 ))
1499}
1500
1501pub fn prepare_preprocess_pipelines(
1503 pipeline_cache: Res<PipelineCache>,
1504 render_device: Res<RenderDevice>,
1505 mut specialized_preprocess_pipelines: ResMut<SpecializedComputePipelines<PreprocessPipeline>>,
1506 mut specialized_reset_indirect_batch_sets_pipelines: ResMut<
1507 SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1508 >,
1509 mut specialized_build_indirect_parameters_pipelines: ResMut<
1510 SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1511 >,
1512 preprocess_pipelines: ResMut<PreprocessPipelines>,
1513 gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1514) {
1515 let preprocess_pipelines = preprocess_pipelines.into_inner();
1516
1517 preprocess_pipelines.direct_preprocess.prepare(
1518 &pipeline_cache,
1519 &mut specialized_preprocess_pipelines,
1520 PreprocessPipelineKey::empty(),
1521 );
1522 preprocess_pipelines.gpu_frustum_culling_preprocess.prepare(
1523 &pipeline_cache,
1524 &mut specialized_preprocess_pipelines,
1525 PreprocessPipelineKey::FRUSTUM_CULLING,
1526 );
1527
1528 if gpu_preprocessing_support.is_culling_supported() {
1529 preprocess_pipelines
1530 .early_gpu_occlusion_culling_preprocess
1531 .prepare(
1532 &pipeline_cache,
1533 &mut specialized_preprocess_pipelines,
1534 PreprocessPipelineKey::FRUSTUM_CULLING
1535 | PreprocessPipelineKey::OCCLUSION_CULLING
1536 | PreprocessPipelineKey::EARLY_PHASE,
1537 );
1538 preprocess_pipelines
1539 .late_gpu_occlusion_culling_preprocess
1540 .prepare(
1541 &pipeline_cache,
1542 &mut specialized_preprocess_pipelines,
1543 PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING,
1544 );
1545 }
1546
1547 let mut build_indirect_parameters_pipeline_key = BuildIndirectParametersPipelineKey::empty();
1548
1549 if render_device
1552 .wgpu_device()
1553 .features()
1554 .contains(WgpuFeatures::MULTI_DRAW_INDIRECT_COUNT)
1555 {
1556 build_indirect_parameters_pipeline_key
1557 .insert(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED);
1558 }
1559
1560 preprocess_pipelines
1561 .gpu_frustum_culling_build_indexed_indirect_params
1562 .prepare(
1563 &pipeline_cache,
1564 &mut specialized_build_indirect_parameters_pipelines,
1565 build_indirect_parameters_pipeline_key | BuildIndirectParametersPipelineKey::INDEXED,
1566 );
1567 preprocess_pipelines
1568 .gpu_frustum_culling_build_non_indexed_indirect_params
1569 .prepare(
1570 &pipeline_cache,
1571 &mut specialized_build_indirect_parameters_pipelines,
1572 build_indirect_parameters_pipeline_key,
1573 );
1574
1575 if !gpu_preprocessing_support.is_culling_supported() {
1576 return;
1577 }
1578
1579 for (preprocess_phase_pipelines, build_indirect_parameters_phase_pipeline_key) in [
1580 (
1581 &mut preprocess_pipelines.early_phase,
1582 BuildIndirectParametersPipelineKey::EARLY_PHASE,
1583 ),
1584 (
1585 &mut preprocess_pipelines.late_phase,
1586 BuildIndirectParametersPipelineKey::LATE_PHASE,
1587 ),
1588 (
1589 &mut preprocess_pipelines.main_phase,
1590 BuildIndirectParametersPipelineKey::MAIN_PHASE,
1591 ),
1592 ] {
1593 preprocess_phase_pipelines
1594 .reset_indirect_batch_sets
1595 .prepare(
1596 &pipeline_cache,
1597 &mut specialized_reset_indirect_batch_sets_pipelines,
1598 );
1599 preprocess_phase_pipelines
1600 .gpu_occlusion_culling_build_indexed_indirect_params
1601 .prepare(
1602 &pipeline_cache,
1603 &mut specialized_build_indirect_parameters_pipelines,
1604 build_indirect_parameters_pipeline_key
1605 | build_indirect_parameters_phase_pipeline_key
1606 | BuildIndirectParametersPipelineKey::INDEXED
1607 | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1608 );
1609 preprocess_phase_pipelines
1610 .gpu_occlusion_culling_build_non_indexed_indirect_params
1611 .prepare(
1612 &pipeline_cache,
1613 &mut specialized_build_indirect_parameters_pipelines,
1614 build_indirect_parameters_pipeline_key
1615 | build_indirect_parameters_phase_pipeline_key
1616 | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING,
1617 );
1618 }
1619}
1620
1621impl PreprocessPipeline {
1622 fn prepare(
1623 &mut self,
1624 pipeline_cache: &PipelineCache,
1625 pipelines: &mut SpecializedComputePipelines<PreprocessPipeline>,
1626 key: PreprocessPipelineKey,
1627 ) {
1628 if self.pipeline_id.is_some() {
1629 return;
1630 }
1631
1632 let preprocess_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1633 self.pipeline_id = Some(preprocess_pipeline_id);
1634 }
1635}
1636
1637impl SpecializedComputePipeline for ResetIndirectBatchSetsPipeline {
1638 type Key = ();
1639
1640 fn specialize(&self, _: Self::Key) -> ComputePipelineDescriptor {
1641 ComputePipelineDescriptor {
1642 label: Some("reset indirect batch sets".into()),
1643 layout: vec![self.bind_group_layout.clone()],
1644 push_constant_ranges: vec![],
1645 shader: RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE,
1646 shader_defs: vec![],
1647 entry_point: "main".into(),
1648 zero_initialize_workgroup_memory: false,
1649 }
1650 }
1651}
1652
1653impl SpecializedComputePipeline for BuildIndirectParametersPipeline {
1654 type Key = BuildIndirectParametersPipelineKey;
1655
1656 fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor {
1657 let mut shader_defs = vec![];
1658 if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1659 shader_defs.push("INDEXED".into());
1660 }
1661 if key.contains(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED) {
1662 shader_defs.push("MULTI_DRAW_INDIRECT_COUNT_SUPPORTED".into());
1663 }
1664 if key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1665 shader_defs.push("OCCLUSION_CULLING".into());
1666 }
1667 if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1668 shader_defs.push("EARLY_PHASE".into());
1669 }
1670 if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1671 shader_defs.push("LATE_PHASE".into());
1672 }
1673 if key.contains(BuildIndirectParametersPipelineKey::MAIN_PHASE) {
1674 shader_defs.push("MAIN_PHASE".into());
1675 }
1676
1677 let label = format!(
1678 "{} build {}indexed indirect parameters",
1679 if !key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) {
1680 "frustum culling"
1681 } else if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) {
1682 "early occlusion culling"
1683 } else if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) {
1684 "late occlusion culling"
1685 } else {
1686 "main occlusion culling"
1687 },
1688 if key.contains(BuildIndirectParametersPipelineKey::INDEXED) {
1689 ""
1690 } else {
1691 "non-"
1692 }
1693 );
1694
1695 ComputePipelineDescriptor {
1696 label: Some(label.into()),
1697 layout: vec![self.bind_group_layout.clone()],
1698 push_constant_ranges: vec![],
1699 shader: BUILD_INDIRECT_PARAMS_SHADER_HANDLE,
1700 shader_defs,
1701 entry_point: "main".into(),
1702 zero_initialize_workgroup_memory: false,
1703 }
1704 }
1705}
1706
1707impl ResetIndirectBatchSetsPipeline {
1708 fn prepare(
1709 &mut self,
1710 pipeline_cache: &PipelineCache,
1711 pipelines: &mut SpecializedComputePipelines<ResetIndirectBatchSetsPipeline>,
1712 ) {
1713 if self.pipeline_id.is_some() {
1714 return;
1715 }
1716
1717 let reset_indirect_batch_sets_pipeline_id = pipelines.specialize(pipeline_cache, self, ());
1718 self.pipeline_id = Some(reset_indirect_batch_sets_pipeline_id);
1719 }
1720}
1721
1722impl BuildIndirectParametersPipeline {
1723 fn prepare(
1724 &mut self,
1725 pipeline_cache: &PipelineCache,
1726 pipelines: &mut SpecializedComputePipelines<BuildIndirectParametersPipeline>,
1727 key: BuildIndirectParametersPipelineKey,
1728 ) {
1729 if self.pipeline_id.is_some() {
1730 return;
1731 }
1732
1733 let build_indirect_parameters_pipeline_id = pipelines.specialize(pipeline_cache, self, key);
1734 self.pipeline_id = Some(build_indirect_parameters_pipeline_id);
1735 }
1736}
1737
1738#[expect(
1741 clippy::too_many_arguments,
1742 reason = "it's a system that needs a lot of arguments"
1743)]
1744pub fn prepare_preprocess_bind_groups(
1745 mut commands: Commands,
1746 views: Query<(Entity, &ExtractedView)>,
1747 view_depth_pyramids: Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1748 render_device: Res<RenderDevice>,
1749 batched_instance_buffers: Res<BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
1750 indirect_parameters_buffers: Res<IndirectParametersBuffers>,
1751 mesh_culling_data_buffer: Res<MeshCullingDataBuffer>,
1752 view_uniforms: Res<ViewUniforms>,
1753 previous_view_uniforms: Res<PreviousViewUniforms>,
1754 pipelines: Res<PreprocessPipelines>,
1755) {
1756 let BatchedInstanceBuffers {
1758 current_input_buffer: current_input_buffer_vec,
1759 previous_input_buffer: previous_input_buffer_vec,
1760 phase_instance_buffers,
1761 } = batched_instance_buffers.into_inner();
1762
1763 let (Some(current_input_buffer), Some(previous_input_buffer)) = (
1764 current_input_buffer_vec.buffer().buffer(),
1765 previous_input_buffer_vec.buffer().buffer(),
1766 ) else {
1767 return;
1768 };
1769
1770 let mut any_indirect = false;
1773
1774 for (view_entity, view) in &views {
1776 let mut bind_groups = TypeIdMap::default();
1777
1778 for (phase_type_id, phase_instance_buffers) in phase_instance_buffers {
1780 let UntypedPhaseBatchedInstanceBuffers {
1781 data_buffer: ref data_buffer_vec,
1782 ref work_item_buffers,
1783 ref late_indexed_indirect_parameters_buffer,
1784 ref late_non_indexed_indirect_parameters_buffer,
1785 } = *phase_instance_buffers;
1786
1787 let Some(data_buffer) = data_buffer_vec.buffer() else {
1788 continue;
1789 };
1790
1791 let Some(phase_indirect_parameters_buffers) =
1793 indirect_parameters_buffers.get(phase_type_id)
1794 else {
1795 continue;
1796 };
1797
1798 let Some(work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) else {
1799 continue;
1800 };
1801
1802 let preprocess_bind_group_builder = PreprocessBindGroupBuilder {
1804 view: view_entity,
1805 late_indexed_indirect_parameters_buffer,
1806 late_non_indexed_indirect_parameters_buffer,
1807 render_device: &render_device,
1808 phase_indirect_parameters_buffers,
1809 mesh_culling_data_buffer: &mesh_culling_data_buffer,
1810 view_uniforms: &view_uniforms,
1811 previous_view_uniforms: &previous_view_uniforms,
1812 pipelines: &pipelines,
1813 current_input_buffer,
1814 previous_input_buffer,
1815 data_buffer,
1816 };
1817
1818 let (was_indirect, bind_group) = match *work_item_buffers {
1821 PreprocessWorkItemBuffers::Direct(ref work_item_buffer) => (
1822 false,
1823 preprocess_bind_group_builder
1824 .create_direct_preprocess_bind_groups(work_item_buffer),
1825 ),
1826
1827 PreprocessWorkItemBuffers::Indirect {
1828 indexed: ref indexed_work_item_buffer,
1829 non_indexed: ref non_indexed_work_item_buffer,
1830 gpu_occlusion_culling: Some(ref gpu_occlusion_culling_work_item_buffers),
1831 } => (
1832 true,
1833 preprocess_bind_group_builder
1834 .create_indirect_occlusion_culling_preprocess_bind_groups(
1835 &view_depth_pyramids,
1836 indexed_work_item_buffer,
1837 non_indexed_work_item_buffer,
1838 gpu_occlusion_culling_work_item_buffers,
1839 ),
1840 ),
1841
1842 PreprocessWorkItemBuffers::Indirect {
1843 indexed: ref indexed_work_item_buffer,
1844 non_indexed: ref non_indexed_work_item_buffer,
1845 gpu_occlusion_culling: None,
1846 } => (
1847 true,
1848 preprocess_bind_group_builder
1849 .create_indirect_frustum_culling_preprocess_bind_groups(
1850 indexed_work_item_buffer,
1851 non_indexed_work_item_buffer,
1852 ),
1853 ),
1854 };
1855
1856 if let Some(bind_group) = bind_group {
1858 any_indirect = any_indirect || was_indirect;
1859 bind_groups.insert(*phase_type_id, bind_group);
1860 }
1861 }
1862
1863 commands
1865 .entity(view_entity)
1866 .insert(PreprocessBindGroups(bind_groups));
1867 }
1868
1869 if any_indirect {
1872 create_build_indirect_parameters_bind_groups(
1873 &mut commands,
1874 &render_device,
1875 &pipelines,
1876 current_input_buffer,
1877 &indirect_parameters_buffers,
1878 );
1879 }
1880}
1881
1882struct PreprocessBindGroupBuilder<'a> {
1885 view: Entity,
1887 late_indexed_indirect_parameters_buffer:
1890 &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1891 late_non_indexed_indirect_parameters_buffer:
1894 &'a RawBufferVec<LatePreprocessWorkItemIndirectParameters>,
1895 render_device: &'a RenderDevice,
1897 phase_indirect_parameters_buffers: &'a UntypedPhaseIndirectParametersBuffers,
1899 mesh_culling_data_buffer: &'a MeshCullingDataBuffer,
1901 view_uniforms: &'a ViewUniforms,
1903 previous_view_uniforms: &'a PreviousViewUniforms,
1905 pipelines: &'a PreprocessPipelines,
1907 current_input_buffer: &'a Buffer,
1910 previous_input_buffer: &'a Buffer,
1913 data_buffer: &'a Buffer,
1919}
1920
1921impl<'a> PreprocessBindGroupBuilder<'a> {
1922 fn create_direct_preprocess_bind_groups(
1925 &self,
1926 work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1927 ) -> Option<PhasePreprocessBindGroups> {
1928 let work_item_buffer_size = NonZero::<u64>::try_from(
1932 work_item_buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()),
1933 )
1934 .ok();
1935
1936 Some(PhasePreprocessBindGroups::Direct(
1937 self.render_device.create_bind_group(
1938 "preprocess_direct_bind_group",
1939 &self.pipelines.direct_preprocess.bind_group_layout,
1940 &BindGroupEntries::with_indices((
1941 (0, self.view_uniforms.uniforms.binding()?),
1942 (3, self.current_input_buffer.as_entire_binding()),
1943 (4, self.previous_input_buffer.as_entire_binding()),
1944 (
1945 5,
1946 BindingResource::Buffer(BufferBinding {
1947 buffer: work_item_buffer.buffer()?,
1948 offset: 0,
1949 size: work_item_buffer_size,
1950 }),
1951 ),
1952 (6, self.data_buffer.as_entire_binding()),
1953 )),
1954 ),
1955 ))
1956 }
1957
1958 fn create_indirect_occlusion_culling_preprocess_bind_groups(
1961 &self,
1962 view_depth_pyramids: &Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>,
1963 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1964 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
1965 gpu_occlusion_culling_work_item_buffers: &GpuOcclusionCullingWorkItemBuffers,
1966 ) -> Option<PhasePreprocessBindGroups> {
1967 let GpuOcclusionCullingWorkItemBuffers {
1968 late_indexed: ref late_indexed_work_item_buffer,
1969 late_non_indexed: ref late_non_indexed_work_item_buffer,
1970 ..
1971 } = *gpu_occlusion_culling_work_item_buffers;
1972
1973 let (view_depth_pyramid, previous_view_uniform_offset) =
1974 view_depth_pyramids.get(self.view).ok()?;
1975
1976 Some(PhasePreprocessBindGroups::IndirectOcclusionCulling {
1977 early_indexed: self.create_indirect_occlusion_culling_early_indexed_bind_group(
1978 view_depth_pyramid,
1979 previous_view_uniform_offset,
1980 indexed_work_item_buffer,
1981 late_indexed_work_item_buffer,
1982 ),
1983
1984 early_non_indexed: self.create_indirect_occlusion_culling_early_non_indexed_bind_group(
1985 view_depth_pyramid,
1986 previous_view_uniform_offset,
1987 non_indexed_work_item_buffer,
1988 late_non_indexed_work_item_buffer,
1989 ),
1990
1991 late_indexed: self.create_indirect_occlusion_culling_late_indexed_bind_group(
1992 view_depth_pyramid,
1993 previous_view_uniform_offset,
1994 late_indexed_work_item_buffer,
1995 ),
1996
1997 late_non_indexed: self.create_indirect_occlusion_culling_late_non_indexed_bind_group(
1998 view_depth_pyramid,
1999 previous_view_uniform_offset,
2000 late_non_indexed_work_item_buffer,
2001 ),
2002 })
2003 }
2004
2005 fn create_indirect_occlusion_culling_early_indexed_bind_group(
2008 &self,
2009 view_depth_pyramid: &ViewDepthPyramid,
2010 previous_view_uniform_offset: &PreviousViewUniformOffset,
2011 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2012 late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2013 ) -> Option<BindGroup> {
2014 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2015 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2016 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2017
2018 match (
2019 self.phase_indirect_parameters_buffers
2020 .indexed
2021 .cpu_metadata_buffer(),
2022 self.phase_indirect_parameters_buffers
2023 .indexed
2024 .gpu_metadata_buffer(),
2025 indexed_work_item_buffer.buffer(),
2026 late_indexed_work_item_buffer.buffer(),
2027 self.late_indexed_indirect_parameters_buffer.buffer(),
2028 ) {
2029 (
2030 Some(indexed_cpu_metadata_buffer),
2031 Some(indexed_gpu_metadata_buffer),
2032 Some(indexed_work_item_gpu_buffer),
2033 Some(late_indexed_work_item_gpu_buffer),
2034 Some(late_indexed_indirect_parameters_buffer),
2035 ) => {
2036 let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2040 indexed_work_item_buffer.len() as u64
2041 * u64::from(PreprocessWorkItem::min_size()),
2042 )
2043 .ok();
2044
2045 Some(
2046 self.render_device.create_bind_group(
2047 "preprocess_early_indexed_gpu_occlusion_culling_bind_group",
2048 &self
2049 .pipelines
2050 .early_gpu_occlusion_culling_preprocess
2051 .bind_group_layout,
2052 &BindGroupEntries::with_indices((
2053 (3, self.current_input_buffer.as_entire_binding()),
2054 (4, self.previous_input_buffer.as_entire_binding()),
2055 (
2056 5,
2057 BindingResource::Buffer(BufferBinding {
2058 buffer: indexed_work_item_gpu_buffer,
2059 offset: 0,
2060 size: indexed_work_item_buffer_size,
2061 }),
2062 ),
2063 (6, self.data_buffer.as_entire_binding()),
2064 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2065 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2066 (9, mesh_culling_data_buffer.as_entire_binding()),
2067 (0, view_uniforms_binding.clone()),
2068 (10, &view_depth_pyramid.all_mips),
2069 (
2070 2,
2071 BufferBinding {
2072 buffer: previous_view_buffer,
2073 offset: previous_view_uniform_offset.offset as u64,
2074 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2075 },
2076 ),
2077 (
2078 11,
2079 BufferBinding {
2080 buffer: late_indexed_work_item_gpu_buffer,
2081 offset: 0,
2082 size: indexed_work_item_buffer_size,
2083 },
2084 ),
2085 (
2086 12,
2087 BufferBinding {
2088 buffer: late_indexed_indirect_parameters_buffer,
2089 offset: 0,
2090 size: NonZeroU64::new(
2091 late_indexed_indirect_parameters_buffer.size(),
2092 ),
2093 },
2094 ),
2095 )),
2096 ),
2097 )
2098 }
2099 _ => None,
2100 }
2101 }
2102
2103 fn create_indirect_occlusion_culling_early_non_indexed_bind_group(
2106 &self,
2107 view_depth_pyramid: &ViewDepthPyramid,
2108 previous_view_uniform_offset: &PreviousViewUniformOffset,
2109 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2110 late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2111 ) -> Option<BindGroup> {
2112 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2113 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2114 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2115
2116 match (
2117 self.phase_indirect_parameters_buffers
2118 .non_indexed
2119 .cpu_metadata_buffer(),
2120 self.phase_indirect_parameters_buffers
2121 .non_indexed
2122 .gpu_metadata_buffer(),
2123 non_indexed_work_item_buffer.buffer(),
2124 late_non_indexed_work_item_buffer.buffer(),
2125 self.late_non_indexed_indirect_parameters_buffer.buffer(),
2126 ) {
2127 (
2128 Some(non_indexed_cpu_metadata_buffer),
2129 Some(non_indexed_gpu_metadata_buffer),
2130 Some(non_indexed_work_item_gpu_buffer),
2131 Some(late_non_indexed_work_item_buffer),
2132 Some(late_non_indexed_indirect_parameters_buffer),
2133 ) => {
2134 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2138 non_indexed_work_item_buffer.len() as u64
2139 * u64::from(PreprocessWorkItem::min_size()),
2140 )
2141 .ok();
2142
2143 Some(
2144 self.render_device.create_bind_group(
2145 "preprocess_early_non_indexed_gpu_occlusion_culling_bind_group",
2146 &self
2147 .pipelines
2148 .early_gpu_occlusion_culling_preprocess
2149 .bind_group_layout,
2150 &BindGroupEntries::with_indices((
2151 (3, self.current_input_buffer.as_entire_binding()),
2152 (4, self.previous_input_buffer.as_entire_binding()),
2153 (
2154 5,
2155 BindingResource::Buffer(BufferBinding {
2156 buffer: non_indexed_work_item_gpu_buffer,
2157 offset: 0,
2158 size: non_indexed_work_item_buffer_size,
2159 }),
2160 ),
2161 (6, self.data_buffer.as_entire_binding()),
2162 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2163 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2164 (9, mesh_culling_data_buffer.as_entire_binding()),
2165 (0, view_uniforms_binding.clone()),
2166 (10, &view_depth_pyramid.all_mips),
2167 (
2168 2,
2169 BufferBinding {
2170 buffer: previous_view_buffer,
2171 offset: previous_view_uniform_offset.offset as u64,
2172 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2173 },
2174 ),
2175 (
2176 11,
2177 BufferBinding {
2178 buffer: late_non_indexed_work_item_buffer,
2179 offset: 0,
2180 size: non_indexed_work_item_buffer_size,
2181 },
2182 ),
2183 (
2184 12,
2185 BufferBinding {
2186 buffer: late_non_indexed_indirect_parameters_buffer,
2187 offset: 0,
2188 size: NonZeroU64::new(
2189 late_non_indexed_indirect_parameters_buffer.size(),
2190 ),
2191 },
2192 ),
2193 )),
2194 ),
2195 )
2196 }
2197 _ => None,
2198 }
2199 }
2200
2201 fn create_indirect_occlusion_culling_late_indexed_bind_group(
2204 &self,
2205 view_depth_pyramid: &ViewDepthPyramid,
2206 previous_view_uniform_offset: &PreviousViewUniformOffset,
2207 late_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2208 ) -> Option<BindGroup> {
2209 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2210 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2211 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2212
2213 match (
2214 self.phase_indirect_parameters_buffers
2215 .indexed
2216 .cpu_metadata_buffer(),
2217 self.phase_indirect_parameters_buffers
2218 .indexed
2219 .gpu_metadata_buffer(),
2220 late_indexed_work_item_buffer.buffer(),
2221 self.late_indexed_indirect_parameters_buffer.buffer(),
2222 ) {
2223 (
2224 Some(indexed_cpu_metadata_buffer),
2225 Some(indexed_gpu_metadata_buffer),
2226 Some(late_indexed_work_item_gpu_buffer),
2227 Some(late_indexed_indirect_parameters_buffer),
2228 ) => {
2229 let late_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2233 late_indexed_work_item_buffer.len() as u64
2234 * u64::from(PreprocessWorkItem::min_size()),
2235 )
2236 .ok();
2237
2238 Some(
2239 self.render_device.create_bind_group(
2240 "preprocess_late_indexed_gpu_occlusion_culling_bind_group",
2241 &self
2242 .pipelines
2243 .late_gpu_occlusion_culling_preprocess
2244 .bind_group_layout,
2245 &BindGroupEntries::with_indices((
2246 (3, self.current_input_buffer.as_entire_binding()),
2247 (4, self.previous_input_buffer.as_entire_binding()),
2248 (
2249 5,
2250 BindingResource::Buffer(BufferBinding {
2251 buffer: late_indexed_work_item_gpu_buffer,
2252 offset: 0,
2253 size: late_indexed_work_item_buffer_size,
2254 }),
2255 ),
2256 (6, self.data_buffer.as_entire_binding()),
2257 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2258 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2259 (9, mesh_culling_data_buffer.as_entire_binding()),
2260 (0, view_uniforms_binding.clone()),
2261 (10, &view_depth_pyramid.all_mips),
2262 (
2263 2,
2264 BufferBinding {
2265 buffer: previous_view_buffer,
2266 offset: previous_view_uniform_offset.offset as u64,
2267 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2268 },
2269 ),
2270 (
2271 12,
2272 BufferBinding {
2273 buffer: late_indexed_indirect_parameters_buffer,
2274 offset: 0,
2275 size: NonZeroU64::new(
2276 late_indexed_indirect_parameters_buffer.size(),
2277 ),
2278 },
2279 ),
2280 )),
2281 ),
2282 )
2283 }
2284 _ => None,
2285 }
2286 }
2287
2288 fn create_indirect_occlusion_culling_late_non_indexed_bind_group(
2291 &self,
2292 view_depth_pyramid: &ViewDepthPyramid,
2293 previous_view_uniform_offset: &PreviousViewUniformOffset,
2294 late_non_indexed_work_item_buffer: &UninitBufferVec<PreprocessWorkItem>,
2295 ) -> Option<BindGroup> {
2296 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2297 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2298 let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?;
2299
2300 match (
2301 self.phase_indirect_parameters_buffers
2302 .non_indexed
2303 .cpu_metadata_buffer(),
2304 self.phase_indirect_parameters_buffers
2305 .non_indexed
2306 .gpu_metadata_buffer(),
2307 late_non_indexed_work_item_buffer.buffer(),
2308 self.late_non_indexed_indirect_parameters_buffer.buffer(),
2309 ) {
2310 (
2311 Some(non_indexed_cpu_metadata_buffer),
2312 Some(non_indexed_gpu_metadata_buffer),
2313 Some(non_indexed_work_item_gpu_buffer),
2314 Some(late_non_indexed_indirect_parameters_buffer),
2315 ) => {
2316 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2320 late_non_indexed_work_item_buffer.len() as u64
2321 * u64::from(PreprocessWorkItem::min_size()),
2322 )
2323 .ok();
2324
2325 Some(
2326 self.render_device.create_bind_group(
2327 "preprocess_late_non_indexed_gpu_occlusion_culling_bind_group",
2328 &self
2329 .pipelines
2330 .late_gpu_occlusion_culling_preprocess
2331 .bind_group_layout,
2332 &BindGroupEntries::with_indices((
2333 (3, self.current_input_buffer.as_entire_binding()),
2334 (4, self.previous_input_buffer.as_entire_binding()),
2335 (
2336 5,
2337 BindingResource::Buffer(BufferBinding {
2338 buffer: non_indexed_work_item_gpu_buffer,
2339 offset: 0,
2340 size: non_indexed_work_item_buffer_size,
2341 }),
2342 ),
2343 (6, self.data_buffer.as_entire_binding()),
2344 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2345 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2346 (9, mesh_culling_data_buffer.as_entire_binding()),
2347 (0, view_uniforms_binding.clone()),
2348 (10, &view_depth_pyramid.all_mips),
2349 (
2350 2,
2351 BufferBinding {
2352 buffer: previous_view_buffer,
2353 offset: previous_view_uniform_offset.offset as u64,
2354 size: NonZeroU64::new(size_of::<PreviousViewData>() as u64),
2355 },
2356 ),
2357 (
2358 12,
2359 BufferBinding {
2360 buffer: late_non_indexed_indirect_parameters_buffer,
2361 offset: 0,
2362 size: NonZeroU64::new(
2363 late_non_indexed_indirect_parameters_buffer.size(),
2364 ),
2365 },
2366 ),
2367 )),
2368 ),
2369 )
2370 }
2371 _ => None,
2372 }
2373 }
2374
2375 fn create_indirect_frustum_culling_preprocess_bind_groups(
2378 &self,
2379 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2380 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2381 ) -> Option<PhasePreprocessBindGroups> {
2382 Some(PhasePreprocessBindGroups::IndirectFrustumCulling {
2383 indexed: self
2384 .create_indirect_frustum_culling_indexed_bind_group(indexed_work_item_buffer),
2385 non_indexed: self.create_indirect_frustum_culling_non_indexed_bind_group(
2386 non_indexed_work_item_buffer,
2387 ),
2388 })
2389 }
2390
2391 fn create_indirect_frustum_culling_indexed_bind_group(
2394 &self,
2395 indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2396 ) -> Option<BindGroup> {
2397 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2398 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2399
2400 match (
2401 self.phase_indirect_parameters_buffers
2402 .indexed
2403 .cpu_metadata_buffer(),
2404 self.phase_indirect_parameters_buffers
2405 .indexed
2406 .gpu_metadata_buffer(),
2407 indexed_work_item_buffer.buffer(),
2408 ) {
2409 (
2410 Some(indexed_cpu_metadata_buffer),
2411 Some(indexed_gpu_metadata_buffer),
2412 Some(indexed_work_item_gpu_buffer),
2413 ) => {
2414 let indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2418 indexed_work_item_buffer.len() as u64
2419 * u64::from(PreprocessWorkItem::min_size()),
2420 )
2421 .ok();
2422
2423 Some(
2424 self.render_device.create_bind_group(
2425 "preprocess_gpu_indexed_frustum_culling_bind_group",
2426 &self
2427 .pipelines
2428 .gpu_frustum_culling_preprocess
2429 .bind_group_layout,
2430 &BindGroupEntries::with_indices((
2431 (3, self.current_input_buffer.as_entire_binding()),
2432 (4, self.previous_input_buffer.as_entire_binding()),
2433 (
2434 5,
2435 BindingResource::Buffer(BufferBinding {
2436 buffer: indexed_work_item_gpu_buffer,
2437 offset: 0,
2438 size: indexed_work_item_buffer_size,
2439 }),
2440 ),
2441 (6, self.data_buffer.as_entire_binding()),
2442 (7, indexed_cpu_metadata_buffer.as_entire_binding()),
2443 (8, indexed_gpu_metadata_buffer.as_entire_binding()),
2444 (9, mesh_culling_data_buffer.as_entire_binding()),
2445 (0, view_uniforms_binding.clone()),
2446 )),
2447 ),
2448 )
2449 }
2450 _ => None,
2451 }
2452 }
2453
2454 fn create_indirect_frustum_culling_non_indexed_bind_group(
2457 &self,
2458 non_indexed_work_item_buffer: &RawBufferVec<PreprocessWorkItem>,
2459 ) -> Option<BindGroup> {
2460 let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?;
2461 let view_uniforms_binding = self.view_uniforms.uniforms.binding()?;
2462
2463 match (
2464 self.phase_indirect_parameters_buffers
2465 .non_indexed
2466 .cpu_metadata_buffer(),
2467 self.phase_indirect_parameters_buffers
2468 .non_indexed
2469 .gpu_metadata_buffer(),
2470 non_indexed_work_item_buffer.buffer(),
2471 ) {
2472 (
2473 Some(non_indexed_cpu_metadata_buffer),
2474 Some(non_indexed_gpu_metadata_buffer),
2475 Some(non_indexed_work_item_gpu_buffer),
2476 ) => {
2477 let non_indexed_work_item_buffer_size = NonZero::<u64>::try_from(
2481 non_indexed_work_item_buffer.len() as u64
2482 * u64::from(PreprocessWorkItem::min_size()),
2483 )
2484 .ok();
2485
2486 Some(
2487 self.render_device.create_bind_group(
2488 "preprocess_gpu_non_indexed_frustum_culling_bind_group",
2489 &self
2490 .pipelines
2491 .gpu_frustum_culling_preprocess
2492 .bind_group_layout,
2493 &BindGroupEntries::with_indices((
2494 (3, self.current_input_buffer.as_entire_binding()),
2495 (4, self.previous_input_buffer.as_entire_binding()),
2496 (
2497 5,
2498 BindingResource::Buffer(BufferBinding {
2499 buffer: non_indexed_work_item_gpu_buffer,
2500 offset: 0,
2501 size: non_indexed_work_item_buffer_size,
2502 }),
2503 ),
2504 (6, self.data_buffer.as_entire_binding()),
2505 (7, non_indexed_cpu_metadata_buffer.as_entire_binding()),
2506 (8, non_indexed_gpu_metadata_buffer.as_entire_binding()),
2507 (9, mesh_culling_data_buffer.as_entire_binding()),
2508 (0, view_uniforms_binding.clone()),
2509 )),
2510 ),
2511 )
2512 }
2513 _ => None,
2514 }
2515 }
2516}
2517
2518fn create_build_indirect_parameters_bind_groups(
2522 commands: &mut Commands,
2523 render_device: &RenderDevice,
2524 pipelines: &PreprocessPipelines,
2525 current_input_buffer: &Buffer,
2526 indirect_parameters_buffers: &IndirectParametersBuffers,
2527) {
2528 let mut build_indirect_parameters_bind_groups = BuildIndirectParametersBindGroups::new();
2529
2530 for (phase_type_id, phase_indirect_parameters_buffer) in indirect_parameters_buffers.iter() {
2531 build_indirect_parameters_bind_groups.insert(
2532 *phase_type_id,
2533 PhaseBuildIndirectParametersBindGroups {
2534 reset_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2535 .indexed
2536 .batch_sets_buffer(),)
2537 {
2538 (Some(indexed_batch_sets_buffer),) => Some(
2539 render_device.create_bind_group(
2540 "reset_indexed_indirect_batch_sets_bind_group",
2541 &pipelines
2544 .early_phase
2545 .reset_indirect_batch_sets
2546 .bind_group_layout,
2547 &BindGroupEntries::sequential((
2548 indexed_batch_sets_buffer.as_entire_binding(),
2549 )),
2550 ),
2551 ),
2552 _ => None,
2553 },
2554
2555 reset_non_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer
2556 .non_indexed
2557 .batch_sets_buffer(),)
2558 {
2559 (Some(non_indexed_batch_sets_buffer),) => Some(
2560 render_device.create_bind_group(
2561 "reset_non_indexed_indirect_batch_sets_bind_group",
2562 &pipelines
2565 .early_phase
2566 .reset_indirect_batch_sets
2567 .bind_group_layout,
2568 &BindGroupEntries::sequential((
2569 non_indexed_batch_sets_buffer.as_entire_binding(),
2570 )),
2571 ),
2572 ),
2573 _ => None,
2574 },
2575
2576 build_indexed_indirect: match (
2577 phase_indirect_parameters_buffer
2578 .indexed
2579 .cpu_metadata_buffer(),
2580 phase_indirect_parameters_buffer
2581 .indexed
2582 .gpu_metadata_buffer(),
2583 phase_indirect_parameters_buffer.indexed.data_buffer(),
2584 phase_indirect_parameters_buffer.indexed.batch_sets_buffer(),
2585 ) {
2586 (
2587 Some(indexed_indirect_parameters_cpu_metadata_buffer),
2588 Some(indexed_indirect_parameters_gpu_metadata_buffer),
2589 Some(indexed_indirect_parameters_data_buffer),
2590 Some(indexed_batch_sets_buffer),
2591 ) => Some(
2592 render_device.create_bind_group(
2593 "build_indexed_indirect_parameters_bind_group",
2594 &pipelines
2597 .gpu_frustum_culling_build_indexed_indirect_params
2598 .bind_group_layout,
2599 &BindGroupEntries::sequential((
2600 current_input_buffer.as_entire_binding(),
2601 BufferBinding {
2604 buffer: indexed_indirect_parameters_cpu_metadata_buffer,
2605 offset: 0,
2606 size: NonZeroU64::new(
2607 phase_indirect_parameters_buffer.indexed.batch_count()
2608 as u64
2609 * size_of::<IndirectParametersCpuMetadata>() as u64,
2610 ),
2611 },
2612 BufferBinding {
2613 buffer: indexed_indirect_parameters_gpu_metadata_buffer,
2614 offset: 0,
2615 size: NonZeroU64::new(
2616 phase_indirect_parameters_buffer.indexed.batch_count()
2617 as u64
2618 * size_of::<IndirectParametersGpuMetadata>() as u64,
2619 ),
2620 },
2621 indexed_batch_sets_buffer.as_entire_binding(),
2622 indexed_indirect_parameters_data_buffer.as_entire_binding(),
2623 )),
2624 ),
2625 ),
2626 _ => None,
2627 },
2628
2629 build_non_indexed_indirect: match (
2630 phase_indirect_parameters_buffer
2631 .non_indexed
2632 .cpu_metadata_buffer(),
2633 phase_indirect_parameters_buffer
2634 .non_indexed
2635 .gpu_metadata_buffer(),
2636 phase_indirect_parameters_buffer.non_indexed.data_buffer(),
2637 phase_indirect_parameters_buffer
2638 .non_indexed
2639 .batch_sets_buffer(),
2640 ) {
2641 (
2642 Some(non_indexed_indirect_parameters_cpu_metadata_buffer),
2643 Some(non_indexed_indirect_parameters_gpu_metadata_buffer),
2644 Some(non_indexed_indirect_parameters_data_buffer),
2645 Some(non_indexed_batch_sets_buffer),
2646 ) => Some(
2647 render_device.create_bind_group(
2648 "build_non_indexed_indirect_parameters_bind_group",
2649 &pipelines
2652 .gpu_frustum_culling_build_non_indexed_indirect_params
2653 .bind_group_layout,
2654 &BindGroupEntries::sequential((
2655 current_input_buffer.as_entire_binding(),
2656 BufferBinding {
2659 buffer: non_indexed_indirect_parameters_cpu_metadata_buffer,
2660 offset: 0,
2661 size: NonZeroU64::new(
2662 phase_indirect_parameters_buffer.non_indexed.batch_count()
2663 as u64
2664 * size_of::<IndirectParametersCpuMetadata>() as u64,
2665 ),
2666 },
2667 BufferBinding {
2668 buffer: non_indexed_indirect_parameters_gpu_metadata_buffer,
2669 offset: 0,
2670 size: NonZeroU64::new(
2671 phase_indirect_parameters_buffer.non_indexed.batch_count()
2672 as u64
2673 * size_of::<IndirectParametersGpuMetadata>() as u64,
2674 ),
2675 },
2676 non_indexed_batch_sets_buffer.as_entire_binding(),
2677 non_indexed_indirect_parameters_data_buffer.as_entire_binding(),
2678 )),
2679 ),
2680 ),
2681 _ => None,
2682 },
2683 },
2684 );
2685 }
2686
2687 commands.insert_resource(build_indirect_parameters_bind_groups);
2688}
2689
2690pub fn write_mesh_culling_data_buffer(
2692 render_device: Res<RenderDevice>,
2693 render_queue: Res<RenderQueue>,
2694 mut mesh_culling_data_buffer: ResMut<MeshCullingDataBuffer>,
2695) {
2696 mesh_culling_data_buffer.write_buffer(&render_device, &render_queue);
2697}