bevy_pbr/render/
mesh.rs

1use core::mem::{self, size_of};
2
3use allocator::MeshAllocator;
4use bevy_asset::{load_internal_asset, AssetId};
5use bevy_core_pipeline::{
6    core_3d::{AlphaMask3d, Opaque3d, Transmissive3d, Transparent3d, CORE_3D_DEPTH_FORMAT},
7    deferred::{AlphaMask3dDeferred, Opaque3dDeferred},
8    oit::{prepare_oit_buffers, OrderIndependentTransparencySettingsOffset},
9    prepass::MotionVectorPrepass,
10};
11use bevy_derive::{Deref, DerefMut};
12use bevy_ecs::{
13    prelude::*,
14    query::ROQueryItem,
15    system::{lifetimeless::*, SystemParamItem, SystemState},
16};
17use bevy_image::{BevyDefault, ImageSampler, TextureFormatPixelInfo};
18use bevy_math::{Affine3, Rect, UVec2, Vec3, Vec4};
19use bevy_render::{
20    batching::{
21        gpu_preprocessing::{
22            self, GpuPreprocessingSupport, IndirectParameters, IndirectParametersBuffer,
23        },
24        no_gpu_preprocessing, GetBatchData, GetFullBatchData, NoAutomaticBatching,
25    },
26    camera::Camera,
27    mesh::*,
28    primitives::Aabb,
29    render_asset::RenderAssets,
30    render_phase::{
31        BinnedRenderPhasePlugin, PhaseItem, RenderCommand, RenderCommandResult,
32        SortedRenderPhasePlugin, TrackedRenderPass,
33    },
34    render_resource::*,
35    renderer::{RenderDevice, RenderQueue},
36    texture::DefaultImageSampler,
37    view::{
38        prepare_view_targets, GpuCulling, RenderVisibilityRanges, ViewTarget, ViewUniformOffset,
39        ViewVisibility, VisibilityRange,
40    },
41    Extract,
42};
43use bevy_transform::components::GlobalTransform;
44use bevy_utils::{
45    tracing::{error, warn},
46    Entry, HashMap, Parallel,
47};
48
49use crate::{
50    render::{
51        morph::{
52            extract_morphs, no_automatic_morph_batching, prepare_morphs, MorphIndices,
53            MorphUniforms,
54        },
55        skin::no_automatic_skin_batching,
56    },
57    *,
58};
59use bevy_render::sync_world::{MainEntity, MainEntityHashMap};
60use bytemuck::{Pod, Zeroable};
61use nonmax::{NonMaxU16, NonMaxU32};
62use smallvec::{smallvec, SmallVec};
63use static_assertions::const_assert_eq;
64
65use self::irradiance_volume::IRRADIANCE_VOLUMES_ARE_USABLE;
66
67/// Provides support for rendering 3D meshes.
68#[derive(Default)]
69pub struct MeshRenderPlugin {
70    /// Whether we're building [`MeshUniform`]s on GPU.
71    ///
72    /// This requires compute shader support and so will be forcibly disabled if
73    /// the platform doesn't support those.
74    pub use_gpu_instance_buffer_builder: bool,
75}
76
77pub const FORWARD_IO_HANDLE: Handle<Shader> = Handle::weak_from_u128(2645551199423808407);
78pub const MESH_VIEW_TYPES_HANDLE: Handle<Shader> = Handle::weak_from_u128(8140454348013264787);
79pub const MESH_VIEW_BINDINGS_HANDLE: Handle<Shader> = Handle::weak_from_u128(9076678235888822571);
80pub const MESH_TYPES_HANDLE: Handle<Shader> = Handle::weak_from_u128(2506024101911992377);
81pub const MESH_BINDINGS_HANDLE: Handle<Shader> = Handle::weak_from_u128(16831548636314682308);
82pub const MESH_FUNCTIONS_HANDLE: Handle<Shader> = Handle::weak_from_u128(6300874327833745635);
83pub const MESH_SHADER_HANDLE: Handle<Shader> = Handle::weak_from_u128(3252377289100772450);
84pub const SKINNING_HANDLE: Handle<Shader> = Handle::weak_from_u128(13215291596265391738);
85pub const MORPH_HANDLE: Handle<Shader> = Handle::weak_from_u128(970982813587607345);
86
87/// How many textures are allowed in the view bind group layout (`@group(0)`) before
88/// broader compatibility with WebGL and WebGPU is at risk, due to the minimum guaranteed
89/// values for `MAX_TEXTURE_IMAGE_UNITS` (in WebGL) and `maxSampledTexturesPerShaderStage` (in WebGPU),
90/// currently both at 16.
91///
92/// We use 10 here because it still leaves us, in a worst case scenario, with 6 textures for the other bind groups.
93///
94/// See: <https://gpuweb.github.io/gpuweb/#limits>
95#[cfg(debug_assertions)]
96pub const MESH_PIPELINE_VIEW_LAYOUT_SAFE_MAX_TEXTURES: usize = 10;
97
98impl Plugin for MeshRenderPlugin {
99    fn build(&self, app: &mut App) {
100        load_internal_asset!(app, FORWARD_IO_HANDLE, "forward_io.wgsl", Shader::from_wgsl);
101        load_internal_asset!(
102            app,
103            MESH_VIEW_TYPES_HANDLE,
104            "mesh_view_types.wgsl",
105            Shader::from_wgsl_with_defs,
106            vec![
107                ShaderDefVal::UInt(
108                    "MAX_DIRECTIONAL_LIGHTS".into(),
109                    MAX_DIRECTIONAL_LIGHTS as u32
110                ),
111                ShaderDefVal::UInt(
112                    "MAX_CASCADES_PER_LIGHT".into(),
113                    MAX_CASCADES_PER_LIGHT as u32,
114                )
115            ]
116        );
117        load_internal_asset!(
118            app,
119            MESH_VIEW_BINDINGS_HANDLE,
120            "mesh_view_bindings.wgsl",
121            Shader::from_wgsl
122        );
123        load_internal_asset!(app, MESH_TYPES_HANDLE, "mesh_types.wgsl", Shader::from_wgsl);
124        load_internal_asset!(
125            app,
126            MESH_FUNCTIONS_HANDLE,
127            "mesh_functions.wgsl",
128            Shader::from_wgsl
129        );
130        load_internal_asset!(app, MESH_SHADER_HANDLE, "mesh.wgsl", Shader::from_wgsl);
131        load_internal_asset!(app, SKINNING_HANDLE, "skinning.wgsl", Shader::from_wgsl);
132        load_internal_asset!(app, MORPH_HANDLE, "morph.wgsl", Shader::from_wgsl);
133
134        if app.get_sub_app(RenderApp).is_none() {
135            return;
136        }
137
138        app.add_systems(
139            PostUpdate,
140            (no_automatic_skin_batching, no_automatic_morph_batching),
141        )
142        .add_plugins((
143            BinnedRenderPhasePlugin::<Opaque3d, MeshPipeline>::default(),
144            BinnedRenderPhasePlugin::<AlphaMask3d, MeshPipeline>::default(),
145            BinnedRenderPhasePlugin::<Shadow, MeshPipeline>::default(),
146            BinnedRenderPhasePlugin::<Opaque3dDeferred, MeshPipeline>::default(),
147            BinnedRenderPhasePlugin::<AlphaMask3dDeferred, MeshPipeline>::default(),
148            SortedRenderPhasePlugin::<Transmissive3d, MeshPipeline>::default(),
149            SortedRenderPhasePlugin::<Transparent3d, MeshPipeline>::default(),
150        ));
151
152        if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
153            render_app
154                .init_resource::<MeshBindGroups>()
155                .init_resource::<SkinUniforms>()
156                .init_resource::<SkinIndices>()
157                .init_resource::<MorphUniforms>()
158                .init_resource::<MorphIndices>()
159                .init_resource::<MeshCullingDataBuffer>()
160                .add_systems(
161                    ExtractSchedule,
162                    (
163                        extract_skins,
164                        extract_morphs,
165                        gpu_preprocessing::clear_batched_gpu_instance_buffers::<MeshPipeline>
166                            .before(ExtractMeshesSet),
167                    ),
168                )
169                .add_systems(
170                    Render,
171                    (
172                        set_mesh_motion_vector_flags.in_set(RenderSet::PrepareAssets),
173                        prepare_skins.in_set(RenderSet::PrepareResources),
174                        prepare_morphs.in_set(RenderSet::PrepareResources),
175                        prepare_mesh_bind_group.in_set(RenderSet::PrepareBindGroups),
176                        prepare_mesh_view_bind_groups
177                            .in_set(RenderSet::PrepareBindGroups)
178                            .after(prepare_oit_buffers),
179                        no_gpu_preprocessing::clear_batched_cpu_instance_buffers::<MeshPipeline>
180                            .in_set(RenderSet::Cleanup)
181                            .after(RenderSet::Render),
182                    ),
183                );
184        }
185    }
186
187    fn finish(&self, app: &mut App) {
188        let mut mesh_bindings_shader_defs = Vec::with_capacity(1);
189
190        if let Some(render_app) = app.get_sub_app_mut(RenderApp) {
191            render_app.init_resource::<GpuPreprocessingSupport>();
192
193            let gpu_preprocessing_support =
194                render_app.world().resource::<GpuPreprocessingSupport>();
195            let use_gpu_instance_buffer_builder = self.use_gpu_instance_buffer_builder
196                && *gpu_preprocessing_support != GpuPreprocessingSupport::None;
197
198            let render_mesh_instances = RenderMeshInstances::new(use_gpu_instance_buffer_builder);
199            render_app.insert_resource(render_mesh_instances);
200
201            if use_gpu_instance_buffer_builder {
202                render_app
203                    .init_resource::<gpu_preprocessing::BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>()
204                    .init_resource::<RenderMeshInstanceGpuQueues>()
205                    .add_systems(
206                        ExtractSchedule,
207                        extract_meshes_for_gpu_building.in_set(ExtractMeshesSet),
208                    )
209                    .add_systems(
210                        Render,
211                        (
212                            gpu_preprocessing::write_batched_instance_buffers::<MeshPipeline>
213                                .in_set(RenderSet::PrepareResourcesFlush),
214                            gpu_preprocessing::delete_old_work_item_buffers::<MeshPipeline>
215                                .in_set(RenderSet::ManageViews)
216                                .after(prepare_view_targets),
217                            collect_meshes_for_gpu_building
218                                .in_set(RenderSet::PrepareAssets)
219                                .after(allocator::allocate_and_free_meshes)
220                                // This must be before
221                                // `set_mesh_motion_vector_flags` so it doesn't
222                                // overwrite those flags.
223                                .before(set_mesh_motion_vector_flags),
224                        ),
225                    );
226            } else {
227                let render_device = render_app.world().resource::<RenderDevice>();
228                let cpu_batched_instance_buffer =
229                    no_gpu_preprocessing::BatchedInstanceBuffer::<MeshUniform>::new(render_device);
230                render_app
231                    .insert_resource(cpu_batched_instance_buffer)
232                    .add_systems(
233                        ExtractSchedule,
234                        extract_meshes_for_cpu_building.in_set(ExtractMeshesSet),
235                    )
236                    .add_systems(
237                        Render,
238                        no_gpu_preprocessing::write_batched_instance_buffer::<MeshPipeline>
239                            .in_set(RenderSet::PrepareResourcesFlush),
240                    );
241            };
242
243            let render_device = render_app.world().resource::<RenderDevice>();
244            if let Some(per_object_buffer_batch_size) =
245                GpuArrayBuffer::<MeshUniform>::batch_size(render_device)
246            {
247                mesh_bindings_shader_defs.push(ShaderDefVal::UInt(
248                    "PER_OBJECT_BUFFER_BATCH_SIZE".into(),
249                    per_object_buffer_batch_size,
250                ));
251            }
252
253            render_app
254                .init_resource::<MeshPipelineViewLayouts>()
255                .init_resource::<MeshPipeline>();
256        }
257
258        // Load the mesh_bindings shader module here as it depends on runtime information about
259        // whether storage buffers are supported, or the maximum uniform buffer binding size.
260        load_internal_asset!(
261            app,
262            MESH_BINDINGS_HANDLE,
263            "mesh_bindings.wgsl",
264            Shader::from_wgsl_with_defs,
265            mesh_bindings_shader_defs
266        );
267    }
268}
269
270#[derive(Component)]
271pub struct MeshTransforms {
272    pub world_from_local: Affine3,
273    pub previous_world_from_local: Affine3,
274    pub flags: u32,
275}
276
277#[derive(ShaderType, Clone)]
278pub struct MeshUniform {
279    // Affine 4x3 matrices transposed to 3x4
280    pub world_from_local: [Vec4; 3],
281    pub previous_world_from_local: [Vec4; 3],
282    // 3x3 matrix packed in mat2x4 and f32 as:
283    //   [0].xyz, [1].x,
284    //   [1].yz, [2].xy
285    //   [2].z
286    pub local_from_world_transpose_a: [Vec4; 2],
287    pub local_from_world_transpose_b: f32,
288    pub flags: u32,
289    // Four 16-bit unsigned normalized UV values packed into a `UVec2`:
290    //
291    //                         <--- MSB                   LSB --->
292    //                         +---- min v ----+ +---- min u ----+
293    //     lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
294    //                         +---- max v ----+ +---- max u ----+
295    //     lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
296    //
297    // (MSB: most significant bit; LSB: least significant bit.)
298    pub lightmap_uv_rect: UVec2,
299    /// The index of this mesh's first vertex in the vertex buffer.
300    ///
301    /// Multiple meshes can be packed into a single vertex buffer (see
302    /// [`MeshAllocator`]). This value stores the offset of the first vertex in
303    /// this mesh in that buffer.
304    pub first_vertex_index: u32,
305    /// Padding.
306    pub pad_a: u32,
307    /// Padding.
308    pub pad_b: u32,
309    /// Padding.
310    pub pad_c: u32,
311}
312
313/// Information that has to be transferred from CPU to GPU in order to produce
314/// the full [`MeshUniform`].
315///
316/// This is essentially a subset of the fields in [`MeshUniform`] above.
317#[derive(ShaderType, Pod, Zeroable, Clone, Copy)]
318#[repr(C)]
319pub struct MeshInputUniform {
320    /// Affine 4x3 matrix transposed to 3x4.
321    pub world_from_local: [Vec4; 3],
322    /// Four 16-bit unsigned normalized UV values packed into a `UVec2`:
323    ///
324    /// ```text
325    ///                         <--- MSB                   LSB --->
326    ///                         +---- min v ----+ +---- min u ----+
327    ///     lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
328    ///                         +---- max v ----+ +---- max u ----+
329    ///     lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
330    ///
331    /// (MSB: most significant bit; LSB: least significant bit.)
332    /// ```
333    pub lightmap_uv_rect: UVec2,
334    /// Various [`MeshFlags`].
335    pub flags: u32,
336    /// The index of this mesh's [`MeshInputUniform`] in the previous frame's
337    /// buffer, if applicable.
338    ///
339    /// This is used for TAA. If not present, this will be `u32::MAX`.
340    pub previous_input_index: u32,
341    /// The index of this mesh's first vertex in the vertex buffer.
342    ///
343    /// Multiple meshes can be packed into a single vertex buffer (see
344    /// [`MeshAllocator`]). This value stores the offset of the first vertex in
345    /// this mesh in that buffer.
346    pub first_vertex_index: u32,
347    /// Padding.
348    pub pad_a: u32,
349    /// Padding.
350    pub pad_b: u32,
351    /// Padding.
352    pub pad_c: u32,
353}
354
355/// Information about each mesh instance needed to cull it on GPU.
356///
357/// This consists of its axis-aligned bounding box (AABB).
358#[derive(ShaderType, Pod, Zeroable, Clone, Copy)]
359#[repr(C)]
360pub struct MeshCullingData {
361    /// The 3D center of the AABB in model space, padded with an extra unused
362    /// float value.
363    pub aabb_center: Vec4,
364    /// The 3D extents of the AABB in model space, divided by two, padded with
365    /// an extra unused float value.
366    pub aabb_half_extents: Vec4,
367}
368
369/// A GPU buffer that holds the information needed to cull meshes on GPU.
370///
371/// At the moment, this simply holds each mesh's AABB.
372///
373/// To avoid wasting CPU time in the CPU culling case, this buffer will be empty
374/// if GPU culling isn't in use.
375#[derive(Resource, Deref, DerefMut)]
376pub struct MeshCullingDataBuffer(RawBufferVec<MeshCullingData>);
377
378impl MeshUniform {
379    pub fn new(
380        mesh_transforms: &MeshTransforms,
381        first_vertex_index: u32,
382        maybe_lightmap_uv_rect: Option<Rect>,
383    ) -> Self {
384        let (local_from_world_transpose_a, local_from_world_transpose_b) =
385            mesh_transforms.world_from_local.inverse_transpose_3x3();
386        Self {
387            world_from_local: mesh_transforms.world_from_local.to_transpose(),
388            previous_world_from_local: mesh_transforms.previous_world_from_local.to_transpose(),
389            lightmap_uv_rect: pack_lightmap_uv_rect(maybe_lightmap_uv_rect),
390            local_from_world_transpose_a,
391            local_from_world_transpose_b,
392            flags: mesh_transforms.flags,
393            first_vertex_index,
394            pad_a: 0,
395            pad_b: 0,
396            pad_c: 0,
397        }
398    }
399}
400
401// NOTE: These must match the bit flags in bevy_pbr/src/render/mesh_types.wgsl!
402bitflags::bitflags! {
403    /// Various flags and tightly-packed values on a mesh.
404    ///
405    /// Flags grow from the top bit down; other values grow from the bottom bit
406    /// up.
407    #[repr(transparent)]
408    pub struct MeshFlags: u32 {
409        /// Bitmask for the 16-bit index into the LOD array.
410        ///
411        /// This will be `u16::MAX` if this mesh has no LOD.
412        const LOD_INDEX_MASK              = (1 << 16) - 1;
413        const SHADOW_RECEIVER             = 1 << 29;
414        const TRANSMITTED_SHADOW_RECEIVER = 1 << 30;
415        // Indicates the sign of the determinant of the 3x3 model matrix. If the sign is positive,
416        // then the flag should be set, else it should not be set.
417        const SIGN_DETERMINANT_MODEL_3X3  = 1 << 31;
418        const NONE                        = 0;
419        const UNINITIALIZED               = 0xFFFFFFFF;
420    }
421}
422
423impl MeshFlags {
424    fn from_components(
425        transform: &GlobalTransform,
426        lod_index: Option<NonMaxU16>,
427        not_shadow_receiver: bool,
428        transmitted_receiver: bool,
429    ) -> MeshFlags {
430        let mut mesh_flags = if not_shadow_receiver {
431            MeshFlags::empty()
432        } else {
433            MeshFlags::SHADOW_RECEIVER
434        };
435        if transmitted_receiver {
436            mesh_flags |= MeshFlags::TRANSMITTED_SHADOW_RECEIVER;
437        }
438        if transform.affine().matrix3.determinant().is_sign_positive() {
439            mesh_flags |= MeshFlags::SIGN_DETERMINANT_MODEL_3X3;
440        }
441
442        let lod_index_bits = match lod_index {
443            None => u16::MAX,
444            Some(lod_index) => u16::from(lod_index),
445        };
446        mesh_flags |=
447            MeshFlags::from_bits_retain((lod_index_bits as u32) << MeshFlags::LOD_INDEX_SHIFT);
448
449        mesh_flags
450    }
451
452    /// The first bit of the LOD index.
453    pub const LOD_INDEX_SHIFT: u32 = 0;
454}
455
456bitflags::bitflags! {
457    /// Various useful flags for [`RenderMeshInstance`]s.
458    #[derive(Clone, Copy)]
459    pub struct RenderMeshInstanceFlags: u8 {
460        /// The mesh casts shadows.
461        const SHADOW_CASTER           = 1 << 0;
462        /// The mesh can participate in automatic batching.
463        const AUTOMATIC_BATCHING      = 1 << 1;
464        /// The mesh had a transform last frame and so is eligible for motion
465        /// vector computation.
466        const HAS_PREVIOUS_TRANSFORM  = 1 << 2;
467        /// The mesh had a skin last frame and so that skin should be taken into
468        /// account for motion vector computation.
469        const HAS_PREVIOUS_SKIN       = 1 << 3;
470        /// The mesh had morph targets last frame and so they should be taken
471        /// into account for motion vector computation.
472        const HAS_PREVIOUS_MORPH      = 1 << 4;
473    }
474}
475
476/// CPU data that the render world keeps for each entity, when *not* using GPU
477/// mesh uniform building.
478#[derive(Deref, DerefMut)]
479pub struct RenderMeshInstanceCpu {
480    /// Data shared between both the CPU mesh uniform building and the GPU mesh
481    /// uniform building paths.
482    #[deref]
483    pub shared: RenderMeshInstanceShared,
484    /// The transform of the mesh.
485    ///
486    /// This will be written into the [`MeshUniform`] at the appropriate time.
487    pub transforms: MeshTransforms,
488}
489
490/// CPU data that the render world needs to keep for each entity that contains a
491/// mesh when using GPU mesh uniform building.
492#[derive(Deref, DerefMut)]
493pub struct RenderMeshInstanceGpu {
494    /// Data shared between both the CPU mesh uniform building and the GPU mesh
495    /// uniform building paths.
496    #[deref]
497    pub shared: RenderMeshInstanceShared,
498    /// The translation of the mesh.
499    ///
500    /// This is the only part of the transform that we have to keep on CPU (for
501    /// distance sorting).
502    pub translation: Vec3,
503    /// The index of the [`MeshInputUniform`] in the buffer.
504    pub current_uniform_index: NonMaxU32,
505}
506
507/// CPU data that the render world needs to keep about each entity that contains
508/// a mesh.
509pub struct RenderMeshInstanceShared {
510    /// The [`AssetId`] of the mesh.
511    pub mesh_asset_id: AssetId<Mesh>,
512    /// A slot for the material bind group ID.
513    ///
514    /// This is filled in during [`crate::material::queue_material_meshes`].
515    pub material_bind_group_id: AtomicMaterialBindGroupId,
516    /// Various flags.
517    pub flags: RenderMeshInstanceFlags,
518}
519
520/// Information that is gathered during the parallel portion of mesh extraction
521/// when GPU mesh uniform building is enabled.
522///
523/// From this, the [`MeshInputUniform`] and [`RenderMeshInstanceGpu`] are
524/// prepared.
525pub struct RenderMeshInstanceGpuBuilder {
526    /// Data that will be placed on the [`RenderMeshInstanceGpu`].
527    pub shared: RenderMeshInstanceShared,
528    /// The current transform.
529    pub world_from_local: Affine3,
530    /// Four 16-bit unsigned normalized UV values packed into a [`UVec2`]:
531    ///
532    /// ```text
533    ///                         <--- MSB                   LSB --->
534    ///                         +---- min v ----+ +---- min u ----+
535    ///     lightmap_uv_rect.x: vvvvvvvv vvvvvvvv uuuuuuuu uuuuuuuu,
536    ///                         +---- max v ----+ +---- max u ----+
537    ///     lightmap_uv_rect.y: VVVVVVVV VVVVVVVV UUUUUUUU UUUUUUUU,
538    ///
539    /// (MSB: most significant bit; LSB: least significant bit.)
540    /// ```
541    pub lightmap_uv_rect: UVec2,
542    /// The index of the previous mesh input.
543    pub previous_input_index: Option<NonMaxU32>,
544    /// Various flags.
545    pub mesh_flags: MeshFlags,
546}
547
548/// The per-thread queues used during [`extract_meshes_for_gpu_building`].
549///
550/// There are two varieties of these: one for when culling happens on CPU and
551/// one for when culling happens on GPU. Having the two varieties avoids wasting
552/// space if GPU culling is disabled.
553#[derive(Default)]
554pub enum RenderMeshInstanceGpuQueue {
555    /// The default value.
556    ///
557    /// This becomes [`RenderMeshInstanceGpuQueue::CpuCulling`] or
558    /// [`RenderMeshInstanceGpuQueue::GpuCulling`] once extraction starts.
559    #[default]
560    None,
561    /// The version of [`RenderMeshInstanceGpuQueue`] that omits the
562    /// [`MeshCullingData`], so that we don't waste space when GPU
563    /// culling is disabled.
564    CpuCulling(Vec<(MainEntity, RenderMeshInstanceGpuBuilder)>),
565    /// The version of [`RenderMeshInstanceGpuQueue`] that contains the
566    /// [`MeshCullingData`], used when any view has GPU culling
567    /// enabled.
568    GpuCulling(Vec<(MainEntity, RenderMeshInstanceGpuBuilder, MeshCullingData)>),
569}
570
571/// The per-thread queues containing mesh instances, populated during the
572/// extract phase.
573///
574/// These are filled in [`extract_meshes_for_gpu_building`] and consumed in
575/// [`collect_meshes_for_gpu_building`].
576#[derive(Resource, Default, Deref, DerefMut)]
577pub struct RenderMeshInstanceGpuQueues(Parallel<RenderMeshInstanceGpuQueue>);
578
579impl RenderMeshInstanceShared {
580    fn from_components(
581        previous_transform: Option<&PreviousGlobalTransform>,
582        mesh: &Mesh3d,
583        not_shadow_caster: bool,
584        no_automatic_batching: bool,
585    ) -> Self {
586        let mut mesh_instance_flags = RenderMeshInstanceFlags::empty();
587        mesh_instance_flags.set(RenderMeshInstanceFlags::SHADOW_CASTER, !not_shadow_caster);
588        mesh_instance_flags.set(
589            RenderMeshInstanceFlags::AUTOMATIC_BATCHING,
590            !no_automatic_batching,
591        );
592        mesh_instance_flags.set(
593            RenderMeshInstanceFlags::HAS_PREVIOUS_TRANSFORM,
594            previous_transform.is_some(),
595        );
596
597        RenderMeshInstanceShared {
598            mesh_asset_id: mesh.id(),
599            flags: mesh_instance_flags,
600            material_bind_group_id: AtomicMaterialBindGroupId::default(),
601        }
602    }
603
604    /// Returns true if this entity is eligible to participate in automatic
605    /// batching.
606    #[inline]
607    pub fn should_batch(&self) -> bool {
608        self.flags
609            .contains(RenderMeshInstanceFlags::AUTOMATIC_BATCHING)
610            && self.material_bind_group_id.get().is_some()
611    }
612}
613
614/// Information that the render world keeps about each entity that contains a
615/// mesh.
616///
617/// The set of information needed is different depending on whether CPU or GPU
618/// [`MeshUniform`] building is in use.
619#[derive(Resource)]
620pub enum RenderMeshInstances {
621    /// Information needed when using CPU mesh instance data building.
622    CpuBuilding(RenderMeshInstancesCpu),
623    /// Information needed when using GPU mesh instance data building.
624    GpuBuilding(RenderMeshInstancesGpu),
625}
626
627/// Information that the render world keeps about each entity that contains a
628/// mesh, when using CPU mesh instance data building.
629#[derive(Default, Deref, DerefMut)]
630pub struct RenderMeshInstancesCpu(MainEntityHashMap<RenderMeshInstanceCpu>);
631
632/// Information that the render world keeps about each entity that contains a
633/// mesh, when using GPU mesh instance data building.
634#[derive(Default, Deref, DerefMut)]
635pub struct RenderMeshInstancesGpu(MainEntityHashMap<RenderMeshInstanceGpu>);
636
637impl RenderMeshInstances {
638    /// Creates a new [`RenderMeshInstances`] instance.
639    fn new(use_gpu_instance_buffer_builder: bool) -> RenderMeshInstances {
640        if use_gpu_instance_buffer_builder {
641            RenderMeshInstances::GpuBuilding(RenderMeshInstancesGpu::default())
642        } else {
643            RenderMeshInstances::CpuBuilding(RenderMeshInstancesCpu::default())
644        }
645    }
646
647    /// Returns the ID of the mesh asset attached to the given entity, if any.
648    pub(crate) fn mesh_asset_id(&self, entity: MainEntity) -> Option<AssetId<Mesh>> {
649        match *self {
650            RenderMeshInstances::CpuBuilding(ref instances) => instances.mesh_asset_id(entity),
651            RenderMeshInstances::GpuBuilding(ref instances) => instances.mesh_asset_id(entity),
652        }
653    }
654
655    /// Constructs [`RenderMeshQueueData`] for the given entity, if it has a
656    /// mesh attached.
657    pub fn render_mesh_queue_data(&self, entity: MainEntity) -> Option<RenderMeshQueueData> {
658        match *self {
659            RenderMeshInstances::CpuBuilding(ref instances) => {
660                instances.render_mesh_queue_data(entity)
661            }
662            RenderMeshInstances::GpuBuilding(ref instances) => {
663                instances.render_mesh_queue_data(entity)
664            }
665        }
666    }
667
668    /// Inserts the given flags into the CPU or GPU render mesh instance data
669    /// for the given mesh as appropriate.
670    fn insert_mesh_instance_flags(&mut self, entity: MainEntity, flags: RenderMeshInstanceFlags) {
671        match *self {
672            RenderMeshInstances::CpuBuilding(ref mut instances) => {
673                instances.insert_mesh_instance_flags(entity, flags);
674            }
675            RenderMeshInstances::GpuBuilding(ref mut instances) => {
676                instances.insert_mesh_instance_flags(entity, flags);
677            }
678        }
679    }
680}
681
682impl RenderMeshInstancesCpu {
683    fn mesh_asset_id(&self, entity: MainEntity) -> Option<AssetId<Mesh>> {
684        self.get(&entity)
685            .map(|render_mesh_instance| render_mesh_instance.mesh_asset_id)
686    }
687
688    fn render_mesh_queue_data(&self, entity: MainEntity) -> Option<RenderMeshQueueData> {
689        self.get(&entity)
690            .map(|render_mesh_instance| RenderMeshQueueData {
691                shared: &render_mesh_instance.shared,
692                translation: render_mesh_instance.transforms.world_from_local.translation,
693            })
694    }
695
696    /// Inserts the given flags into the render mesh instance data for the given
697    /// mesh.
698    fn insert_mesh_instance_flags(&mut self, entity: MainEntity, flags: RenderMeshInstanceFlags) {
699        if let Some(instance) = self.get_mut(&entity) {
700            instance.flags.insert(flags);
701        }
702    }
703}
704
705impl RenderMeshInstancesGpu {
706    fn mesh_asset_id(&self, entity: MainEntity) -> Option<AssetId<Mesh>> {
707        self.get(&entity)
708            .map(|render_mesh_instance| render_mesh_instance.mesh_asset_id)
709    }
710
711    fn render_mesh_queue_data(&self, entity: MainEntity) -> Option<RenderMeshQueueData> {
712        self.get(&entity)
713            .map(|render_mesh_instance| RenderMeshQueueData {
714                shared: &render_mesh_instance.shared,
715                translation: render_mesh_instance.translation,
716            })
717    }
718
719    /// Inserts the given flags into the render mesh instance data for the given
720    /// mesh.
721    fn insert_mesh_instance_flags(&mut self, entity: MainEntity, flags: RenderMeshInstanceFlags) {
722        if let Some(instance) = self.get_mut(&entity) {
723            instance.flags.insert(flags);
724        }
725    }
726}
727
728impl RenderMeshInstanceGpuQueue {
729    /// Clears out a [`RenderMeshInstanceGpuQueue`], creating or recreating it
730    /// as necessary.
731    ///
732    /// `any_gpu_culling` should be set to true if any view has GPU culling
733    /// enabled.
734    fn init(&mut self, any_gpu_culling: bool) {
735        match (any_gpu_culling, &mut *self) {
736            (true, RenderMeshInstanceGpuQueue::GpuCulling(queue)) => queue.clear(),
737            (true, _) => *self = RenderMeshInstanceGpuQueue::GpuCulling(vec![]),
738            (false, RenderMeshInstanceGpuQueue::CpuCulling(queue)) => queue.clear(),
739            (false, _) => *self = RenderMeshInstanceGpuQueue::CpuCulling(vec![]),
740        }
741    }
742
743    /// Adds a new mesh to this queue.
744    fn push(
745        &mut self,
746        entity: MainEntity,
747        instance_builder: RenderMeshInstanceGpuBuilder,
748        culling_data_builder: Option<MeshCullingData>,
749    ) {
750        match (&mut *self, culling_data_builder) {
751            (&mut RenderMeshInstanceGpuQueue::CpuCulling(ref mut queue), None) => {
752                queue.push((entity, instance_builder));
753            }
754            (
755                &mut RenderMeshInstanceGpuQueue::GpuCulling(ref mut queue),
756                Some(culling_data_builder),
757            ) => {
758                queue.push((entity, instance_builder, culling_data_builder));
759            }
760            (_, None) => {
761                *self = RenderMeshInstanceGpuQueue::CpuCulling(vec![(entity, instance_builder)]);
762            }
763            (_, Some(culling_data_builder)) => {
764                *self = RenderMeshInstanceGpuQueue::GpuCulling(vec![(
765                    entity,
766                    instance_builder,
767                    culling_data_builder,
768                )]);
769            }
770        }
771    }
772}
773
774impl RenderMeshInstanceGpuBuilder {
775    /// Flushes this mesh instance to the [`RenderMeshInstanceGpu`] and
776    /// [`MeshInputUniform`] tables.
777    fn add_to(
778        self,
779        entity: MainEntity,
780        render_mesh_instances: &mut MainEntityHashMap<RenderMeshInstanceGpu>,
781        current_input_buffer: &mut RawBufferVec<MeshInputUniform>,
782        mesh_allocator: &MeshAllocator,
783    ) -> usize {
784        let first_vertex_index = match mesh_allocator.mesh_vertex_slice(&self.shared.mesh_asset_id)
785        {
786            Some(mesh_vertex_slice) => mesh_vertex_slice.range.start,
787            None => 0,
788        };
789
790        // Push the mesh input uniform.
791        let current_uniform_index = current_input_buffer.push(MeshInputUniform {
792            world_from_local: self.world_from_local.to_transpose(),
793            lightmap_uv_rect: self.lightmap_uv_rect,
794            flags: self.mesh_flags.bits(),
795            previous_input_index: match self.previous_input_index {
796                Some(previous_input_index) => previous_input_index.into(),
797                None => u32::MAX,
798            },
799            first_vertex_index,
800            pad_a: 0,
801            pad_b: 0,
802            pad_c: 0,
803        });
804
805        // Record the [`RenderMeshInstance`].
806        render_mesh_instances.insert(
807            entity,
808            RenderMeshInstanceGpu {
809                translation: self.world_from_local.translation,
810                shared: self.shared,
811                current_uniform_index: (current_uniform_index as u32)
812                    .try_into()
813                    .unwrap_or_default(),
814            },
815        );
816
817        current_uniform_index
818    }
819}
820
821impl MeshCullingData {
822    /// Returns a new [`MeshCullingData`] initialized with the given AABB.
823    ///
824    /// If no AABB is provided, an infinitely-large one is conservatively
825    /// chosen.
826    fn new(aabb: Option<&Aabb>) -> Self {
827        match aabb {
828            Some(aabb) => MeshCullingData {
829                aabb_center: aabb.center.extend(0.0),
830                aabb_half_extents: aabb.half_extents.extend(0.0),
831            },
832            None => MeshCullingData {
833                aabb_center: Vec3::ZERO.extend(0.0),
834                aabb_half_extents: Vec3::INFINITY.extend(0.0),
835            },
836        }
837    }
838
839    /// Flushes this mesh instance culling data to the
840    /// [`MeshCullingDataBuffer`].
841    fn add_to(&self, mesh_culling_data_buffer: &mut MeshCullingDataBuffer) -> usize {
842        mesh_culling_data_buffer.push(*self)
843    }
844}
845
846impl Default for MeshCullingDataBuffer {
847    #[inline]
848    fn default() -> Self {
849        Self(RawBufferVec::new(BufferUsages::STORAGE))
850    }
851}
852
853/// Data that [`crate::material::queue_material_meshes`] and similar systems
854/// need in order to place entities that contain meshes in the right batch.
855#[derive(Deref)]
856pub struct RenderMeshQueueData<'a> {
857    /// General information about the mesh instance.
858    #[deref]
859    pub shared: &'a RenderMeshInstanceShared,
860    /// The translation of the mesh instance.
861    pub translation: Vec3,
862}
863
864/// A [`SystemSet`] that encompasses both [`extract_meshes_for_cpu_building`]
865/// and [`extract_meshes_for_gpu_building`].
866#[derive(SystemSet, Clone, PartialEq, Eq, Debug, Hash)]
867pub struct ExtractMeshesSet;
868
869/// Extracts meshes from the main world into the render world, populating the
870/// [`RenderMeshInstances`].
871///
872/// This is the variant of the system that runs when we're *not* using GPU
873/// [`MeshUniform`] building.
874pub fn extract_meshes_for_cpu_building(
875    mut render_mesh_instances: ResMut<RenderMeshInstances>,
876    render_visibility_ranges: Res<RenderVisibilityRanges>,
877    mut render_mesh_instance_queues: Local<Parallel<Vec<(Entity, RenderMeshInstanceCpu)>>>,
878    meshes_query: Extract<
879        Query<(
880            Entity,
881            &ViewVisibility,
882            &GlobalTransform,
883            Option<&PreviousGlobalTransform>,
884            &Mesh3d,
885            Has<NotShadowReceiver>,
886            Has<TransmittedShadowReceiver>,
887            Has<NotShadowCaster>,
888            Has<NoAutomaticBatching>,
889            Has<VisibilityRange>,
890        )>,
891    >,
892) {
893    meshes_query.par_iter().for_each_init(
894        || render_mesh_instance_queues.borrow_local_mut(),
895        |queue,
896         (
897            entity,
898            view_visibility,
899            transform,
900            previous_transform,
901            mesh,
902            not_shadow_receiver,
903            transmitted_receiver,
904            not_shadow_caster,
905            no_automatic_batching,
906            visibility_range,
907        )| {
908            if !view_visibility.get() {
909                return;
910            }
911
912            let mut lod_index = None;
913            if visibility_range {
914                lod_index = render_visibility_ranges.lod_index_for_entity(entity.into());
915            }
916
917            let mesh_flags = MeshFlags::from_components(
918                transform,
919                lod_index,
920                not_shadow_receiver,
921                transmitted_receiver,
922            );
923
924            let shared = RenderMeshInstanceShared::from_components(
925                previous_transform,
926                mesh,
927                not_shadow_caster,
928                no_automatic_batching,
929            );
930
931            let world_from_local = transform.affine();
932            queue.push((
933                entity,
934                RenderMeshInstanceCpu {
935                    transforms: MeshTransforms {
936                        world_from_local: (&world_from_local).into(),
937                        previous_world_from_local: (&previous_transform
938                            .map(|t| t.0)
939                            .unwrap_or(world_from_local))
940                            .into(),
941                        flags: mesh_flags.bits(),
942                    },
943                    shared,
944                },
945            ));
946        },
947    );
948
949    // Collect the render mesh instances.
950    let RenderMeshInstances::CpuBuilding(ref mut render_mesh_instances) = *render_mesh_instances
951    else {
952        panic!(
953            "`extract_meshes_for_cpu_building` should only be called if we're using CPU \
954            `MeshUniform` building"
955        );
956    };
957
958    render_mesh_instances.clear();
959    for queue in render_mesh_instance_queues.iter_mut() {
960        for (entity, render_mesh_instance) in queue.drain(..) {
961            render_mesh_instances.insert_unique_unchecked(entity.into(), render_mesh_instance);
962        }
963    }
964}
965
966/// Extracts meshes from the main world into the render world and queues
967/// [`MeshInputUniform`]s to be uploaded to the GPU.
968///
969/// This is the variant of the system that runs when we're using GPU
970/// [`MeshUniform`] building.
971pub fn extract_meshes_for_gpu_building(
972    mut render_mesh_instances: ResMut<RenderMeshInstances>,
973    render_visibility_ranges: Res<RenderVisibilityRanges>,
974    mut render_mesh_instance_queues: ResMut<RenderMeshInstanceGpuQueues>,
975    meshes_query: Extract<
976        Query<(
977            Entity,
978            &ViewVisibility,
979            &GlobalTransform,
980            Option<&PreviousGlobalTransform>,
981            Option<&Lightmap>,
982            Option<&Aabb>,
983            &Mesh3d,
984            Has<NotShadowReceiver>,
985            Has<TransmittedShadowReceiver>,
986            Has<NotShadowCaster>,
987            Has<NoAutomaticBatching>,
988            Has<VisibilityRange>,
989        )>,
990    >,
991    cameras_query: Extract<Query<(), (With<Camera>, With<GpuCulling>)>>,
992) {
993    let any_gpu_culling = !cameras_query.is_empty();
994    for render_mesh_instance_queue in render_mesh_instance_queues.iter_mut() {
995        render_mesh_instance_queue.init(any_gpu_culling);
996    }
997
998    // Collect render mesh instances. Build up the uniform buffer.
999    let RenderMeshInstances::GpuBuilding(ref mut render_mesh_instances) = *render_mesh_instances
1000    else {
1001        panic!(
1002            "`extract_meshes_for_gpu_building` should only be called if we're \
1003            using GPU `MeshUniform` building"
1004        );
1005    };
1006
1007    meshes_query.par_iter().for_each_init(
1008        || render_mesh_instance_queues.borrow_local_mut(),
1009        |queue,
1010         (
1011            entity,
1012            view_visibility,
1013            transform,
1014            previous_transform,
1015            lightmap,
1016            aabb,
1017            mesh,
1018            not_shadow_receiver,
1019            transmitted_receiver,
1020            not_shadow_caster,
1021            no_automatic_batching,
1022            visibility_range,
1023        )| {
1024            if !view_visibility.get() {
1025                return;
1026            }
1027
1028            let mut lod_index = None;
1029            if visibility_range {
1030                lod_index = render_visibility_ranges.lod_index_for_entity(entity.into());
1031            }
1032
1033            let mesh_flags = MeshFlags::from_components(
1034                transform,
1035                lod_index,
1036                not_shadow_receiver,
1037                transmitted_receiver,
1038            );
1039
1040            let shared = RenderMeshInstanceShared::from_components(
1041                previous_transform,
1042                mesh,
1043                not_shadow_caster,
1044                no_automatic_batching,
1045            );
1046
1047            let lightmap_uv_rect = pack_lightmap_uv_rect(lightmap.map(|lightmap| lightmap.uv_rect));
1048
1049            let gpu_mesh_culling_data = any_gpu_culling.then(|| MeshCullingData::new(aabb));
1050
1051            let previous_input_index = if shared
1052                .flags
1053                .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_TRANSFORM)
1054            {
1055                render_mesh_instances
1056                    .get(&MainEntity::from(entity))
1057                    .map(|render_mesh_instance| render_mesh_instance.current_uniform_index)
1058            } else {
1059                None
1060            };
1061
1062            let gpu_mesh_instance_builder = RenderMeshInstanceGpuBuilder {
1063                shared,
1064                world_from_local: (&transform.affine()).into(),
1065                lightmap_uv_rect,
1066                mesh_flags,
1067                previous_input_index,
1068            };
1069
1070            queue.push(
1071                entity.into(),
1072                gpu_mesh_instance_builder,
1073                gpu_mesh_culling_data,
1074            );
1075        },
1076    );
1077}
1078
1079/// A system that sets the [`RenderMeshInstanceFlags`] for each mesh based on
1080/// whether the previous frame had skins and/or morph targets.
1081///
1082/// Ordinarily, [`RenderMeshInstanceFlags`] are set during the extraction phase.
1083/// However, we can't do that for the flags related to skins and morph targets
1084/// because the previous frame's skin and morph targets are the responsibility
1085/// of [`extract_skins`] and [`extract_morphs`] respectively. We want to run
1086/// those systems in parallel with mesh extraction for performance, so we need
1087/// to defer setting of these mesh instance flags to after extraction, which
1088/// this system does. An alternative to having skin- and morph-target-related
1089/// data in [`RenderMeshInstanceFlags`] would be to have
1090/// [`crate::material::queue_material_meshes`] check the skin and morph target
1091/// tables for each mesh, but that would be too slow in the hot mesh queuing
1092/// loop.
1093fn set_mesh_motion_vector_flags(
1094    mut render_mesh_instances: ResMut<RenderMeshInstances>,
1095    skin_indices: Res<SkinIndices>,
1096    morph_indices: Res<MorphIndices>,
1097) {
1098    for &entity in skin_indices.prev.keys() {
1099        render_mesh_instances
1100            .insert_mesh_instance_flags(entity, RenderMeshInstanceFlags::HAS_PREVIOUS_SKIN);
1101    }
1102    for &entity in morph_indices.prev.keys() {
1103        render_mesh_instances
1104            .insert_mesh_instance_flags(entity, RenderMeshInstanceFlags::HAS_PREVIOUS_MORPH);
1105    }
1106}
1107
1108/// Creates the [`RenderMeshInstanceGpu`]s and [`MeshInputUniform`]s when GPU
1109/// mesh uniforms are built.
1110pub fn collect_meshes_for_gpu_building(
1111    render_mesh_instances: ResMut<RenderMeshInstances>,
1112    batched_instance_buffers: ResMut<
1113        gpu_preprocessing::BatchedInstanceBuffers<MeshUniform, MeshInputUniform>,
1114    >,
1115    mut mesh_culling_data_buffer: ResMut<MeshCullingDataBuffer>,
1116    mut render_mesh_instance_queues: ResMut<RenderMeshInstanceGpuQueues>,
1117    mesh_allocator: Res<MeshAllocator>,
1118) {
1119    let RenderMeshInstances::GpuBuilding(ref mut render_mesh_instances) =
1120        render_mesh_instances.into_inner()
1121    else {
1122        return;
1123    };
1124
1125    // Collect render mesh instances. Build up the uniform buffer.
1126
1127    let gpu_preprocessing::BatchedInstanceBuffers {
1128        ref mut current_input_buffer,
1129        ref mut previous_input_buffer,
1130        ..
1131    } = batched_instance_buffers.into_inner();
1132
1133    // Swap buffers.
1134    mem::swap(current_input_buffer, previous_input_buffer);
1135
1136    // Build the [`RenderMeshInstance`]s and [`MeshInputUniform`]s.
1137    render_mesh_instances.clear();
1138
1139    for queue in render_mesh_instance_queues.iter_mut() {
1140        match *queue {
1141            RenderMeshInstanceGpuQueue::None => {
1142                // This can only happen if the queue is empty.
1143            }
1144            RenderMeshInstanceGpuQueue::CpuCulling(ref mut queue) => {
1145                for (entity, mesh_instance_builder) in queue.drain(..) {
1146                    mesh_instance_builder.add_to(
1147                        entity,
1148                        &mut *render_mesh_instances,
1149                        current_input_buffer,
1150                        &mesh_allocator,
1151                    );
1152                }
1153            }
1154            RenderMeshInstanceGpuQueue::GpuCulling(ref mut queue) => {
1155                for (entity, mesh_instance_builder, mesh_culling_builder) in queue.drain(..) {
1156                    let instance_data_index = mesh_instance_builder.add_to(
1157                        entity,
1158                        &mut *render_mesh_instances,
1159                        current_input_buffer,
1160                        &mesh_allocator,
1161                    );
1162                    let culling_data_index =
1163                        mesh_culling_builder.add_to(&mut mesh_culling_data_buffer);
1164                    debug_assert_eq!(instance_data_index, culling_data_index);
1165                }
1166            }
1167        }
1168    }
1169}
1170
1171/// All data needed to construct a pipeline for rendering 3D meshes.
1172#[derive(Resource, Clone)]
1173pub struct MeshPipeline {
1174    /// A reference to all the mesh pipeline view layouts.
1175    pub view_layouts: MeshPipelineViewLayouts,
1176    // This dummy white texture is to be used in place of optional StandardMaterial textures
1177    pub dummy_white_gpu_image: GpuImage,
1178    pub clustered_forward_buffer_binding_type: BufferBindingType,
1179    pub mesh_layouts: MeshLayouts,
1180    /// `MeshUniform`s are stored in arrays in buffers. If storage buffers are available, they
1181    /// are used and this will be `None`, otherwise uniform buffers will be used with batches
1182    /// of this many `MeshUniform`s, stored at dynamic offsets within the uniform buffer.
1183    /// Use code like this in custom shaders:
1184    /// ```wgsl
1185    /// ##ifdef PER_OBJECT_BUFFER_BATCH_SIZE
1186    /// @group(1) @binding(0) var<uniform> mesh: array<Mesh, #{PER_OBJECT_BUFFER_BATCH_SIZE}u>;
1187    /// ##else
1188    /// @group(1) @binding(0) var<storage> mesh: array<Mesh>;
1189    /// ##endif // PER_OBJECT_BUFFER_BATCH_SIZE
1190    /// ```
1191    pub per_object_buffer_batch_size: Option<u32>,
1192
1193    /// Whether binding arrays (a.k.a. bindless textures) are usable on the
1194    /// current render device.
1195    ///
1196    /// This affects whether reflection probes can be used.
1197    pub binding_arrays_are_usable: bool,
1198}
1199
1200impl FromWorld for MeshPipeline {
1201    fn from_world(world: &mut World) -> Self {
1202        let mut system_state: SystemState<(
1203            Res<RenderDevice>,
1204            Res<DefaultImageSampler>,
1205            Res<RenderQueue>,
1206            Res<MeshPipelineViewLayouts>,
1207        )> = SystemState::new(world);
1208        let (render_device, default_sampler, render_queue, view_layouts) =
1209            system_state.get_mut(world);
1210
1211        let clustered_forward_buffer_binding_type = render_device
1212            .get_supported_read_only_binding_type(CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT);
1213
1214        // A 1x1x1 'all 1.0' texture to use as a dummy texture to use in place of optional StandardMaterial textures
1215        let dummy_white_gpu_image = {
1216            let image = Image::default();
1217            let texture = render_device.create_texture(&image.texture_descriptor);
1218            let sampler = match image.sampler {
1219                ImageSampler::Default => (**default_sampler).clone(),
1220                ImageSampler::Descriptor(ref descriptor) => {
1221                    render_device.create_sampler(&descriptor.as_wgpu())
1222                }
1223            };
1224
1225            let format_size = image.texture_descriptor.format.pixel_size();
1226            render_queue.write_texture(
1227                texture.as_image_copy(),
1228                &image.data,
1229                ImageDataLayout {
1230                    offset: 0,
1231                    bytes_per_row: Some(image.width() * format_size as u32),
1232                    rows_per_image: None,
1233                },
1234                image.texture_descriptor.size,
1235            );
1236
1237            let texture_view = texture.create_view(&TextureViewDescriptor::default());
1238            GpuImage {
1239                texture,
1240                texture_view,
1241                texture_format: image.texture_descriptor.format,
1242                sampler,
1243                size: image.size(),
1244                mip_level_count: image.texture_descriptor.mip_level_count,
1245            }
1246        };
1247
1248        MeshPipeline {
1249            view_layouts: view_layouts.clone(),
1250            clustered_forward_buffer_binding_type,
1251            dummy_white_gpu_image,
1252            mesh_layouts: MeshLayouts::new(&render_device),
1253            per_object_buffer_batch_size: GpuArrayBuffer::<MeshUniform>::batch_size(&render_device),
1254            binding_arrays_are_usable: binding_arrays_are_usable(&render_device),
1255        }
1256    }
1257}
1258
1259impl MeshPipeline {
1260    pub fn get_image_texture<'a>(
1261        &'a self,
1262        gpu_images: &'a RenderAssets<GpuImage>,
1263        handle_option: &Option<Handle<Image>>,
1264    ) -> Option<(&'a TextureView, &'a Sampler)> {
1265        if let Some(handle) = handle_option {
1266            let gpu_image = gpu_images.get(handle)?;
1267            Some((&gpu_image.texture_view, &gpu_image.sampler))
1268        } else {
1269            Some((
1270                &self.dummy_white_gpu_image.texture_view,
1271                &self.dummy_white_gpu_image.sampler,
1272            ))
1273        }
1274    }
1275
1276    pub fn get_view_layout(&self, layout_key: MeshPipelineViewLayoutKey) -> &BindGroupLayout {
1277        self.view_layouts.get_view_layout(layout_key)
1278    }
1279}
1280
1281impl GetBatchData for MeshPipeline {
1282    type Param = (
1283        SRes<RenderMeshInstances>,
1284        SRes<RenderLightmaps>,
1285        SRes<RenderAssets<RenderMesh>>,
1286        SRes<MeshAllocator>,
1287    );
1288    // The material bind group ID, the mesh ID, and the lightmap ID,
1289    // respectively.
1290    type CompareData = (MaterialBindGroupId, AssetId<Mesh>, Option<AssetId<Image>>);
1291
1292    type BufferData = MeshUniform;
1293
1294    fn get_batch_data(
1295        (mesh_instances, lightmaps, _, mesh_allocator): &SystemParamItem<Self::Param>,
1296        (_entity, main_entity): (Entity, MainEntity),
1297    ) -> Option<(Self::BufferData, Option<Self::CompareData>)> {
1298        let RenderMeshInstances::CpuBuilding(ref mesh_instances) = **mesh_instances else {
1299            error!(
1300                "`get_batch_data` should never be called in GPU mesh uniform \
1301                building mode"
1302            );
1303            return None;
1304        };
1305        let mesh_instance = mesh_instances.get(&main_entity)?;
1306        let first_vertex_index =
1307            match mesh_allocator.mesh_vertex_slice(&mesh_instance.mesh_asset_id) {
1308                Some(mesh_vertex_slice) => mesh_vertex_slice.range.start,
1309                None => 0,
1310            };
1311        let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity);
1312
1313        Some((
1314            MeshUniform::new(
1315                &mesh_instance.transforms,
1316                first_vertex_index,
1317                maybe_lightmap.map(|lightmap| lightmap.uv_rect),
1318            ),
1319            mesh_instance.should_batch().then_some((
1320                mesh_instance.material_bind_group_id.get(),
1321                mesh_instance.mesh_asset_id,
1322                maybe_lightmap.map(|lightmap| lightmap.image),
1323            )),
1324        ))
1325    }
1326}
1327
1328impl GetFullBatchData for MeshPipeline {
1329    type BufferInputData = MeshInputUniform;
1330
1331    fn get_index_and_compare_data(
1332        (mesh_instances, lightmaps, _, _): &SystemParamItem<Self::Param>,
1333        (_entity, main_entity): (Entity, MainEntity),
1334    ) -> Option<(NonMaxU32, Option<Self::CompareData>)> {
1335        // This should only be called during GPU building.
1336        let RenderMeshInstances::GpuBuilding(ref mesh_instances) = **mesh_instances else {
1337            error!(
1338                "`get_index_and_compare_data` should never be called in CPU mesh uniform building \
1339                mode"
1340            );
1341            return None;
1342        };
1343
1344        let mesh_instance = mesh_instances.get(&main_entity)?;
1345        let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity);
1346
1347        Some((
1348            mesh_instance.current_uniform_index,
1349            mesh_instance.should_batch().then_some((
1350                mesh_instance.material_bind_group_id.get(),
1351                mesh_instance.mesh_asset_id,
1352                maybe_lightmap.map(|lightmap| lightmap.image),
1353            )),
1354        ))
1355    }
1356
1357    fn get_binned_batch_data(
1358        (mesh_instances, lightmaps, _, mesh_allocator): &SystemParamItem<Self::Param>,
1359        (_entity, main_entity): (Entity, MainEntity),
1360    ) -> Option<Self::BufferData> {
1361        let RenderMeshInstances::CpuBuilding(ref mesh_instances) = **mesh_instances else {
1362            error!(
1363                "`get_binned_batch_data` should never be called in GPU mesh uniform building mode"
1364            );
1365            return None;
1366        };
1367        let mesh_instance = mesh_instances.get(&main_entity)?;
1368        let first_vertex_index =
1369            match mesh_allocator.mesh_vertex_slice(&mesh_instance.mesh_asset_id) {
1370                Some(mesh_vertex_slice) => mesh_vertex_slice.range.start,
1371                None => 0,
1372            };
1373        let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity);
1374
1375        Some(MeshUniform::new(
1376            &mesh_instance.transforms,
1377            first_vertex_index,
1378            maybe_lightmap.map(|lightmap| lightmap.uv_rect),
1379        ))
1380    }
1381
1382    fn get_binned_index(
1383        (mesh_instances, _, _, _): &SystemParamItem<Self::Param>,
1384        (_entity, main_entity): (Entity, MainEntity),
1385    ) -> Option<NonMaxU32> {
1386        // This should only be called during GPU building.
1387        let RenderMeshInstances::GpuBuilding(ref mesh_instances) = **mesh_instances else {
1388            error!(
1389                "`get_binned_index` should never be called in CPU mesh uniform \
1390                building mode"
1391            );
1392            return None;
1393        };
1394
1395        mesh_instances
1396            .get(&main_entity)
1397            .map(|entity| entity.current_uniform_index)
1398    }
1399
1400    fn get_batch_indirect_parameters_index(
1401        (mesh_instances, _, meshes, mesh_allocator): &SystemParamItem<Self::Param>,
1402        indirect_parameters_buffer: &mut IndirectParametersBuffer,
1403        entity: (Entity, MainEntity),
1404        instance_index: u32,
1405    ) -> Option<NonMaxU32> {
1406        get_batch_indirect_parameters_index(
1407            mesh_instances,
1408            meshes,
1409            mesh_allocator,
1410            indirect_parameters_buffer,
1411            entity,
1412            instance_index,
1413        )
1414    }
1415}
1416
1417/// Pushes a set of [`IndirectParameters`] onto the [`IndirectParametersBuffer`]
1418/// for the given mesh instance, and returns the index of those indirect
1419/// parameters.
1420fn get_batch_indirect_parameters_index(
1421    mesh_instances: &RenderMeshInstances,
1422    meshes: &RenderAssets<RenderMesh>,
1423    mesh_allocator: &MeshAllocator,
1424    indirect_parameters_buffer: &mut IndirectParametersBuffer,
1425    (_entity, main_entity): (Entity, MainEntity),
1426    instance_index: u32,
1427) -> Option<NonMaxU32> {
1428    // This should only be called during GPU building.
1429    let RenderMeshInstances::GpuBuilding(ref mesh_instances) = *mesh_instances else {
1430        error!(
1431            "`get_batch_indirect_parameters_index` should never be called in CPU mesh uniform \
1432                building mode"
1433        );
1434        return None;
1435    };
1436
1437    let mesh_instance = mesh_instances.get(&main_entity)?;
1438    let mesh = meshes.get(mesh_instance.mesh_asset_id)?;
1439    let vertex_buffer_slice = mesh_allocator.mesh_vertex_slice(&mesh_instance.mesh_asset_id)?;
1440
1441    // Note that `IndirectParameters` covers both of these structures, even
1442    // though they actually have distinct layouts. See the comment above that
1443    // type for more information.
1444    let indirect_parameters = match mesh.buffer_info {
1445        RenderMeshBufferInfo::Indexed {
1446            count: index_count, ..
1447        } => {
1448            let index_buffer_slice =
1449                mesh_allocator.mesh_index_slice(&mesh_instance.mesh_asset_id)?;
1450            IndirectParameters {
1451                vertex_or_index_count: index_count,
1452                instance_count: 0,
1453                first_vertex_or_first_index: index_buffer_slice.range.start,
1454                base_vertex_or_first_instance: vertex_buffer_slice.range.start,
1455                first_instance: instance_index,
1456            }
1457        }
1458        RenderMeshBufferInfo::NonIndexed => IndirectParameters {
1459            vertex_or_index_count: mesh.vertex_count,
1460            instance_count: 0,
1461            first_vertex_or_first_index: vertex_buffer_slice.range.start,
1462            base_vertex_or_first_instance: instance_index,
1463            first_instance: instance_index,
1464        },
1465    };
1466
1467    (indirect_parameters_buffer.push(indirect_parameters) as u32)
1468        .try_into()
1469        .ok()
1470}
1471
1472bitflags::bitflags! {
1473    #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
1474    #[repr(transparent)]
1475    // NOTE: Apparently quadro drivers support up to 64x MSAA.
1476    /// MSAA uses the highest 3 bits for the MSAA log2(sample count) to support up to 128x MSAA.
1477    pub struct MeshPipelineKey: u64 {
1478        // Nothing
1479        const NONE                              = 0;
1480
1481        // Inherited bits
1482        const MORPH_TARGETS                     = BaseMeshPipelineKey::MORPH_TARGETS.bits();
1483
1484        // Flag bits
1485        const HDR                               = 1 << 0;
1486        const TONEMAP_IN_SHADER                 = 1 << 1;
1487        const DEBAND_DITHER                     = 1 << 2;
1488        const DEPTH_PREPASS                     = 1 << 3;
1489        const NORMAL_PREPASS                    = 1 << 4;
1490        const DEFERRED_PREPASS                  = 1 << 5;
1491        const MOTION_VECTOR_PREPASS             = 1 << 6;
1492        const MAY_DISCARD                       = 1 << 7; // Guards shader codepaths that may discard, allowing early depth tests in most cases
1493                                                            // See: https://www.khronos.org/opengl/wiki/Early_Fragment_Test
1494        const ENVIRONMENT_MAP                   = 1 << 8;
1495        const SCREEN_SPACE_AMBIENT_OCCLUSION    = 1 << 9;
1496        const DEPTH_CLAMP_ORTHO                 = 1 << 10;
1497        const TEMPORAL_JITTER                   = 1 << 11;
1498        const READS_VIEW_TRANSMISSION_TEXTURE   = 1 << 12;
1499        const LIGHTMAPPED                       = 1 << 13;
1500        const IRRADIANCE_VOLUME                 = 1 << 14;
1501        const VISIBILITY_RANGE_DITHER           = 1 << 15;
1502        const SCREEN_SPACE_REFLECTIONS          = 1 << 16;
1503        const HAS_PREVIOUS_SKIN                 = 1 << 17;
1504        const HAS_PREVIOUS_MORPH                = 1 << 18;
1505        const OIT_ENABLED                       = 1 << 19;
1506        const LAST_FLAG                         = Self::OIT_ENABLED.bits();
1507
1508        // Bitfields
1509        const MSAA_RESERVED_BITS                = Self::MSAA_MASK_BITS << Self::MSAA_SHIFT_BITS;
1510        const BLEND_RESERVED_BITS               = Self::BLEND_MASK_BITS << Self::BLEND_SHIFT_BITS; // ← Bitmask reserving bits for the blend state
1511        const BLEND_OPAQUE                      = 0 << Self::BLEND_SHIFT_BITS;                     // ← Values are just sequential within the mask
1512        const BLEND_PREMULTIPLIED_ALPHA         = 1 << Self::BLEND_SHIFT_BITS;                     // ← As blend states is on 3 bits, it can range from 0 to 7
1513        const BLEND_MULTIPLY                    = 2 << Self::BLEND_SHIFT_BITS;                     // ← See `BLEND_MASK_BITS` for the number of bits available
1514        const BLEND_ALPHA                       = 3 << Self::BLEND_SHIFT_BITS;                     //
1515        const BLEND_ALPHA_TO_COVERAGE           = 4 << Self::BLEND_SHIFT_BITS;                     // ← We still have room for three more values without adding more bits
1516        const TONEMAP_METHOD_RESERVED_BITS      = Self::TONEMAP_METHOD_MASK_BITS << Self::TONEMAP_METHOD_SHIFT_BITS;
1517        const TONEMAP_METHOD_NONE               = 0 << Self::TONEMAP_METHOD_SHIFT_BITS;
1518        const TONEMAP_METHOD_REINHARD           = 1 << Self::TONEMAP_METHOD_SHIFT_BITS;
1519        const TONEMAP_METHOD_REINHARD_LUMINANCE = 2 << Self::TONEMAP_METHOD_SHIFT_BITS;
1520        const TONEMAP_METHOD_ACES_FITTED        = 3 << Self::TONEMAP_METHOD_SHIFT_BITS;
1521        const TONEMAP_METHOD_AGX                = 4 << Self::TONEMAP_METHOD_SHIFT_BITS;
1522        const TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM = 5 << Self::TONEMAP_METHOD_SHIFT_BITS;
1523        const TONEMAP_METHOD_TONY_MC_MAPFACE    = 6 << Self::TONEMAP_METHOD_SHIFT_BITS;
1524        const TONEMAP_METHOD_BLENDER_FILMIC     = 7 << Self::TONEMAP_METHOD_SHIFT_BITS;
1525        const SHADOW_FILTER_METHOD_RESERVED_BITS = Self::SHADOW_FILTER_METHOD_MASK_BITS << Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
1526        const SHADOW_FILTER_METHOD_HARDWARE_2X2  = 0 << Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
1527        const SHADOW_FILTER_METHOD_GAUSSIAN      = 1 << Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
1528        const SHADOW_FILTER_METHOD_TEMPORAL      = 2 << Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
1529        const VIEW_PROJECTION_RESERVED_BITS     = Self::VIEW_PROJECTION_MASK_BITS << Self::VIEW_PROJECTION_SHIFT_BITS;
1530        const VIEW_PROJECTION_NONSTANDARD       = 0 << Self::VIEW_PROJECTION_SHIFT_BITS;
1531        const VIEW_PROJECTION_PERSPECTIVE       = 1 << Self::VIEW_PROJECTION_SHIFT_BITS;
1532        const VIEW_PROJECTION_ORTHOGRAPHIC      = 2 << Self::VIEW_PROJECTION_SHIFT_BITS;
1533        const VIEW_PROJECTION_RESERVED          = 3 << Self::VIEW_PROJECTION_SHIFT_BITS;
1534        const SCREEN_SPACE_SPECULAR_TRANSMISSION_RESERVED_BITS = Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_MASK_BITS << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
1535        const SCREEN_SPACE_SPECULAR_TRANSMISSION_LOW    = 0 << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
1536        const SCREEN_SPACE_SPECULAR_TRANSMISSION_MEDIUM = 1 << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
1537        const SCREEN_SPACE_SPECULAR_TRANSMISSION_HIGH   = 2 << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
1538        const SCREEN_SPACE_SPECULAR_TRANSMISSION_ULTRA  = 3 << Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS;
1539        const ALL_RESERVED_BITS =
1540            Self::BLEND_RESERVED_BITS.bits() |
1541            Self::MSAA_RESERVED_BITS.bits() |
1542            Self::TONEMAP_METHOD_RESERVED_BITS.bits() |
1543            Self::SHADOW_FILTER_METHOD_RESERVED_BITS.bits() |
1544            Self::VIEW_PROJECTION_RESERVED_BITS.bits() |
1545            Self::SCREEN_SPACE_SPECULAR_TRANSMISSION_RESERVED_BITS.bits();
1546    }
1547}
1548
1549impl MeshPipelineKey {
1550    const MSAA_MASK_BITS: u64 = 0b111;
1551    const MSAA_SHIFT_BITS: u64 = Self::LAST_FLAG.bits().trailing_zeros() as u64 + 1;
1552
1553    const BLEND_MASK_BITS: u64 = 0b111;
1554    const BLEND_SHIFT_BITS: u64 = Self::MSAA_MASK_BITS.count_ones() as u64 + Self::MSAA_SHIFT_BITS;
1555
1556    const TONEMAP_METHOD_MASK_BITS: u64 = 0b111;
1557    const TONEMAP_METHOD_SHIFT_BITS: u64 =
1558        Self::BLEND_MASK_BITS.count_ones() as u64 + Self::BLEND_SHIFT_BITS;
1559
1560    const SHADOW_FILTER_METHOD_MASK_BITS: u64 = 0b11;
1561    const SHADOW_FILTER_METHOD_SHIFT_BITS: u64 =
1562        Self::TONEMAP_METHOD_MASK_BITS.count_ones() as u64 + Self::TONEMAP_METHOD_SHIFT_BITS;
1563
1564    const VIEW_PROJECTION_MASK_BITS: u64 = 0b11;
1565    const VIEW_PROJECTION_SHIFT_BITS: u64 = Self::SHADOW_FILTER_METHOD_MASK_BITS.count_ones()
1566        as u64
1567        + Self::SHADOW_FILTER_METHOD_SHIFT_BITS;
1568
1569    const SCREEN_SPACE_SPECULAR_TRANSMISSION_MASK_BITS: u64 = 0b11;
1570    const SCREEN_SPACE_SPECULAR_TRANSMISSION_SHIFT_BITS: u64 =
1571        Self::VIEW_PROJECTION_MASK_BITS.count_ones() as u64 + Self::VIEW_PROJECTION_SHIFT_BITS;
1572
1573    pub fn from_msaa_samples(msaa_samples: u32) -> Self {
1574        let msaa_bits =
1575            (msaa_samples.trailing_zeros() as u64 & Self::MSAA_MASK_BITS) << Self::MSAA_SHIFT_BITS;
1576        Self::from_bits_retain(msaa_bits)
1577    }
1578
1579    pub fn from_hdr(hdr: bool) -> Self {
1580        if hdr {
1581            MeshPipelineKey::HDR
1582        } else {
1583            MeshPipelineKey::NONE
1584        }
1585    }
1586
1587    pub fn msaa_samples(&self) -> u32 {
1588        1 << ((self.bits() >> Self::MSAA_SHIFT_BITS) & Self::MSAA_MASK_BITS)
1589    }
1590
1591    pub fn from_primitive_topology(primitive_topology: PrimitiveTopology) -> Self {
1592        let primitive_topology_bits = ((primitive_topology as u64)
1593            & BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_MASK_BITS)
1594            << BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_SHIFT_BITS;
1595        Self::from_bits_retain(primitive_topology_bits)
1596    }
1597
1598    pub fn primitive_topology(&self) -> PrimitiveTopology {
1599        let primitive_topology_bits = (self.bits()
1600            >> BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_SHIFT_BITS)
1601            & BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_MASK_BITS;
1602        match primitive_topology_bits {
1603            x if x == PrimitiveTopology::PointList as u64 => PrimitiveTopology::PointList,
1604            x if x == PrimitiveTopology::LineList as u64 => PrimitiveTopology::LineList,
1605            x if x == PrimitiveTopology::LineStrip as u64 => PrimitiveTopology::LineStrip,
1606            x if x == PrimitiveTopology::TriangleList as u64 => PrimitiveTopology::TriangleList,
1607            x if x == PrimitiveTopology::TriangleStrip as u64 => PrimitiveTopology::TriangleStrip,
1608            _ => PrimitiveTopology::default(),
1609        }
1610    }
1611}
1612
1613// Ensure that we didn't overflow the number of bits available in `MeshPipelineKey`.
1614const_assert_eq!(
1615    (((MeshPipelineKey::LAST_FLAG.bits() << 1) - 1) | MeshPipelineKey::ALL_RESERVED_BITS.bits())
1616        & BaseMeshPipelineKey::all().bits(),
1617    0
1618);
1619
1620// Ensure that the reserved bits don't overlap with the topology bits
1621const_assert_eq!(
1622    (BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_MASK_BITS
1623        << BaseMeshPipelineKey::PRIMITIVE_TOPOLOGY_SHIFT_BITS)
1624        & MeshPipelineKey::ALL_RESERVED_BITS.bits(),
1625    0
1626);
1627
1628fn is_skinned(layout: &MeshVertexBufferLayoutRef) -> bool {
1629    layout.0.contains(Mesh::ATTRIBUTE_JOINT_INDEX)
1630        && layout.0.contains(Mesh::ATTRIBUTE_JOINT_WEIGHT)
1631}
1632pub fn setup_morph_and_skinning_defs(
1633    mesh_layouts: &MeshLayouts,
1634    layout: &MeshVertexBufferLayoutRef,
1635    offset: u32,
1636    key: &MeshPipelineKey,
1637    shader_defs: &mut Vec<ShaderDefVal>,
1638    vertex_attributes: &mut Vec<VertexAttributeDescriptor>,
1639) -> BindGroupLayout {
1640    let mut add_skin_data = || {
1641        shader_defs.push("SKINNED".into());
1642        vertex_attributes.push(Mesh::ATTRIBUTE_JOINT_INDEX.at_shader_location(offset));
1643        vertex_attributes.push(Mesh::ATTRIBUTE_JOINT_WEIGHT.at_shader_location(offset + 1));
1644    };
1645    let is_morphed = key.intersects(MeshPipelineKey::MORPH_TARGETS);
1646    let is_lightmapped = key.intersects(MeshPipelineKey::LIGHTMAPPED);
1647    let motion_vector_prepass = key.intersects(MeshPipelineKey::MOTION_VECTOR_PREPASS);
1648    match (
1649        is_skinned(layout),
1650        is_morphed,
1651        is_lightmapped,
1652        motion_vector_prepass,
1653    ) {
1654        (true, false, _, true) => {
1655            add_skin_data();
1656            mesh_layouts.skinned_motion.clone()
1657        }
1658        (true, false, _, false) => {
1659            add_skin_data();
1660            mesh_layouts.skinned.clone()
1661        }
1662        (true, true, _, true) => {
1663            add_skin_data();
1664            shader_defs.push("MORPH_TARGETS".into());
1665            mesh_layouts.morphed_skinned_motion.clone()
1666        }
1667        (true, true, _, false) => {
1668            add_skin_data();
1669            shader_defs.push("MORPH_TARGETS".into());
1670            mesh_layouts.morphed_skinned.clone()
1671        }
1672        (false, true, _, true) => {
1673            shader_defs.push("MORPH_TARGETS".into());
1674            mesh_layouts.morphed_motion.clone()
1675        }
1676        (false, true, _, false) => {
1677            shader_defs.push("MORPH_TARGETS".into());
1678            mesh_layouts.morphed.clone()
1679        }
1680        (false, false, true, _) => mesh_layouts.lightmapped.clone(),
1681        (false, false, false, _) => mesh_layouts.model_only.clone(),
1682    }
1683}
1684
1685impl SpecializedMeshPipeline for MeshPipeline {
1686    type Key = MeshPipelineKey;
1687
1688    fn specialize(
1689        &self,
1690        key: Self::Key,
1691        layout: &MeshVertexBufferLayoutRef,
1692    ) -> Result<RenderPipelineDescriptor, SpecializedMeshPipelineError> {
1693        let mut shader_defs = Vec::new();
1694        let mut vertex_attributes = Vec::new();
1695
1696        // Let the shader code know that it's running in a mesh pipeline.
1697        shader_defs.push("MESH_PIPELINE".into());
1698
1699        shader_defs.push("VERTEX_OUTPUT_INSTANCE_INDEX".into());
1700
1701        if layout.0.contains(Mesh::ATTRIBUTE_POSITION) {
1702            shader_defs.push("VERTEX_POSITIONS".into());
1703            vertex_attributes.push(Mesh::ATTRIBUTE_POSITION.at_shader_location(0));
1704        }
1705
1706        if layout.0.contains(Mesh::ATTRIBUTE_NORMAL) {
1707            shader_defs.push("VERTEX_NORMALS".into());
1708            vertex_attributes.push(Mesh::ATTRIBUTE_NORMAL.at_shader_location(1));
1709        }
1710
1711        if layout.0.contains(Mesh::ATTRIBUTE_UV_0) {
1712            shader_defs.push("VERTEX_UVS".into());
1713            shader_defs.push("VERTEX_UVS_A".into());
1714            vertex_attributes.push(Mesh::ATTRIBUTE_UV_0.at_shader_location(2));
1715        }
1716
1717        if layout.0.contains(Mesh::ATTRIBUTE_UV_1) {
1718            shader_defs.push("VERTEX_UVS".into());
1719            shader_defs.push("VERTEX_UVS_B".into());
1720            vertex_attributes.push(Mesh::ATTRIBUTE_UV_1.at_shader_location(3));
1721        }
1722
1723        if layout.0.contains(Mesh::ATTRIBUTE_TANGENT) {
1724            shader_defs.push("VERTEX_TANGENTS".into());
1725            vertex_attributes.push(Mesh::ATTRIBUTE_TANGENT.at_shader_location(4));
1726        }
1727
1728        if layout.0.contains(Mesh::ATTRIBUTE_COLOR) {
1729            shader_defs.push("VERTEX_COLORS".into());
1730            vertex_attributes.push(Mesh::ATTRIBUTE_COLOR.at_shader_location(5));
1731        }
1732
1733        if cfg!(feature = "pbr_transmission_textures") {
1734            shader_defs.push("PBR_TRANSMISSION_TEXTURES_SUPPORTED".into());
1735        }
1736        if cfg!(feature = "pbr_multi_layer_material_textures") {
1737            shader_defs.push("PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED".into());
1738        }
1739        if cfg!(feature = "pbr_anisotropy_texture") {
1740            shader_defs.push("PBR_ANISOTROPY_TEXTURE_SUPPORTED".into());
1741        }
1742
1743        let mut bind_group_layout = vec![self.get_view_layout(key.into()).clone()];
1744
1745        if key.msaa_samples() > 1 {
1746            shader_defs.push("MULTISAMPLED".into());
1747        };
1748
1749        bind_group_layout.push(setup_morph_and_skinning_defs(
1750            &self.mesh_layouts,
1751            layout,
1752            6,
1753            &key,
1754            &mut shader_defs,
1755            &mut vertex_attributes,
1756        ));
1757
1758        if key.contains(MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION) {
1759            shader_defs.push("SCREEN_SPACE_AMBIENT_OCCLUSION".into());
1760        }
1761
1762        let vertex_buffer_layout = layout.0.get_layout(&vertex_attributes)?;
1763
1764        let (label, blend, depth_write_enabled);
1765        let pass = key.intersection(MeshPipelineKey::BLEND_RESERVED_BITS);
1766        let (mut is_opaque, mut alpha_to_coverage_enabled) = (false, false);
1767        if key.contains(MeshPipelineKey::OIT_ENABLED) && pass == MeshPipelineKey::BLEND_ALPHA {
1768            label = "oit_mesh_pipeline".into();
1769            // TODO tail blending would need alpha blending
1770            blend = None;
1771            shader_defs.push("OIT_ENABLED".into());
1772            // TODO it should be possible to use this to combine MSAA and OIT
1773            // alpha_to_coverage_enabled = true;
1774            depth_write_enabled = false;
1775        } else if pass == MeshPipelineKey::BLEND_ALPHA {
1776            label = "alpha_blend_mesh_pipeline".into();
1777            blend = Some(BlendState::ALPHA_BLENDING);
1778            // For the transparent pass, fragments that are closer will be alpha blended
1779            // but their depth is not written to the depth buffer
1780            depth_write_enabled = false;
1781        } else if pass == MeshPipelineKey::BLEND_PREMULTIPLIED_ALPHA {
1782            label = "premultiplied_alpha_mesh_pipeline".into();
1783            blend = Some(BlendState::PREMULTIPLIED_ALPHA_BLENDING);
1784            shader_defs.push("PREMULTIPLY_ALPHA".into());
1785            shader_defs.push("BLEND_PREMULTIPLIED_ALPHA".into());
1786            // For the transparent pass, fragments that are closer will be alpha blended
1787            // but their depth is not written to the depth buffer
1788            depth_write_enabled = false;
1789        } else if pass == MeshPipelineKey::BLEND_MULTIPLY {
1790            label = "multiply_mesh_pipeline".into();
1791            blend = Some(BlendState {
1792                color: BlendComponent {
1793                    src_factor: BlendFactor::Dst,
1794                    dst_factor: BlendFactor::OneMinusSrcAlpha,
1795                    operation: BlendOperation::Add,
1796                },
1797                alpha: BlendComponent::OVER,
1798            });
1799            shader_defs.push("PREMULTIPLY_ALPHA".into());
1800            shader_defs.push("BLEND_MULTIPLY".into());
1801            // For the multiply pass, fragments that are closer will be alpha blended
1802            // but their depth is not written to the depth buffer
1803            depth_write_enabled = false;
1804        } else if pass == MeshPipelineKey::BLEND_ALPHA_TO_COVERAGE {
1805            label = "alpha_to_coverage_mesh_pipeline".into();
1806            // BlendState::REPLACE is not needed here, and None will be potentially much faster in some cases
1807            blend = None;
1808            // For the opaque and alpha mask passes, fragments that are closer will replace
1809            // the current fragment value in the output and the depth is written to the
1810            // depth buffer
1811            depth_write_enabled = true;
1812            is_opaque = !key.contains(MeshPipelineKey::READS_VIEW_TRANSMISSION_TEXTURE);
1813            alpha_to_coverage_enabled = true;
1814            shader_defs.push("ALPHA_TO_COVERAGE".into());
1815        } else {
1816            label = "opaque_mesh_pipeline".into();
1817            // BlendState::REPLACE is not needed here, and None will be potentially much faster in some cases
1818            blend = None;
1819            // For the opaque and alpha mask passes, fragments that are closer will replace
1820            // the current fragment value in the output and the depth is written to the
1821            // depth buffer
1822            depth_write_enabled = true;
1823            is_opaque = !key.contains(MeshPipelineKey::READS_VIEW_TRANSMISSION_TEXTURE);
1824        }
1825
1826        if key.contains(MeshPipelineKey::NORMAL_PREPASS) {
1827            shader_defs.push("NORMAL_PREPASS".into());
1828        }
1829
1830        if key.contains(MeshPipelineKey::DEPTH_PREPASS) {
1831            shader_defs.push("DEPTH_PREPASS".into());
1832        }
1833
1834        if key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) {
1835            shader_defs.push("MOTION_VECTOR_PREPASS".into());
1836        }
1837
1838        if key.contains(MeshPipelineKey::HAS_PREVIOUS_SKIN) {
1839            shader_defs.push("HAS_PREVIOUS_SKIN".into());
1840        }
1841
1842        if key.contains(MeshPipelineKey::HAS_PREVIOUS_MORPH) {
1843            shader_defs.push("HAS_PREVIOUS_MORPH".into());
1844        }
1845
1846        if key.contains(MeshPipelineKey::DEFERRED_PREPASS) {
1847            shader_defs.push("DEFERRED_PREPASS".into());
1848        }
1849
1850        if key.contains(MeshPipelineKey::NORMAL_PREPASS) && key.msaa_samples() == 1 && is_opaque {
1851            shader_defs.push("LOAD_PREPASS_NORMALS".into());
1852        }
1853
1854        let view_projection = key.intersection(MeshPipelineKey::VIEW_PROJECTION_RESERVED_BITS);
1855        if view_projection == MeshPipelineKey::VIEW_PROJECTION_NONSTANDARD {
1856            shader_defs.push("VIEW_PROJECTION_NONSTANDARD".into());
1857        } else if view_projection == MeshPipelineKey::VIEW_PROJECTION_PERSPECTIVE {
1858            shader_defs.push("VIEW_PROJECTION_PERSPECTIVE".into());
1859        } else if view_projection == MeshPipelineKey::VIEW_PROJECTION_ORTHOGRAPHIC {
1860            shader_defs.push("VIEW_PROJECTION_ORTHOGRAPHIC".into());
1861        }
1862
1863        #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))]
1864        shader_defs.push("WEBGL2".into());
1865
1866        #[cfg(feature = "experimental_pbr_pcss")]
1867        shader_defs.push("PCSS_SAMPLERS_AVAILABLE".into());
1868
1869        if key.contains(MeshPipelineKey::TONEMAP_IN_SHADER) {
1870            shader_defs.push("TONEMAP_IN_SHADER".into());
1871            shader_defs.push(ShaderDefVal::UInt(
1872                "TONEMAPPING_LUT_TEXTURE_BINDING_INDEX".into(),
1873                TONEMAPPING_LUT_TEXTURE_BINDING_INDEX,
1874            ));
1875            shader_defs.push(ShaderDefVal::UInt(
1876                "TONEMAPPING_LUT_SAMPLER_BINDING_INDEX".into(),
1877                TONEMAPPING_LUT_SAMPLER_BINDING_INDEX,
1878            ));
1879
1880            let method = key.intersection(MeshPipelineKey::TONEMAP_METHOD_RESERVED_BITS);
1881
1882            if method == MeshPipelineKey::TONEMAP_METHOD_NONE {
1883                shader_defs.push("TONEMAP_METHOD_NONE".into());
1884            } else if method == MeshPipelineKey::TONEMAP_METHOD_REINHARD {
1885                shader_defs.push("TONEMAP_METHOD_REINHARD".into());
1886            } else if method == MeshPipelineKey::TONEMAP_METHOD_REINHARD_LUMINANCE {
1887                shader_defs.push("TONEMAP_METHOD_REINHARD_LUMINANCE".into());
1888            } else if method == MeshPipelineKey::TONEMAP_METHOD_ACES_FITTED {
1889                shader_defs.push("TONEMAP_METHOD_ACES_FITTED".into());
1890            } else if method == MeshPipelineKey::TONEMAP_METHOD_AGX {
1891                shader_defs.push("TONEMAP_METHOD_AGX".into());
1892            } else if method == MeshPipelineKey::TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM {
1893                shader_defs.push("TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM".into());
1894            } else if method == MeshPipelineKey::TONEMAP_METHOD_BLENDER_FILMIC {
1895                shader_defs.push("TONEMAP_METHOD_BLENDER_FILMIC".into());
1896            } else if method == MeshPipelineKey::TONEMAP_METHOD_TONY_MC_MAPFACE {
1897                shader_defs.push("TONEMAP_METHOD_TONY_MC_MAPFACE".into());
1898            }
1899
1900            // Debanding is tied to tonemapping in the shader, cannot run without it.
1901            if key.contains(MeshPipelineKey::DEBAND_DITHER) {
1902                shader_defs.push("DEBAND_DITHER".into());
1903            }
1904        }
1905
1906        if key.contains(MeshPipelineKey::MAY_DISCARD) {
1907            shader_defs.push("MAY_DISCARD".into());
1908        }
1909
1910        if key.contains(MeshPipelineKey::ENVIRONMENT_MAP) {
1911            shader_defs.push("ENVIRONMENT_MAP".into());
1912        }
1913
1914        if key.contains(MeshPipelineKey::IRRADIANCE_VOLUME) && IRRADIANCE_VOLUMES_ARE_USABLE {
1915            shader_defs.push("IRRADIANCE_VOLUME".into());
1916        }
1917
1918        if key.contains(MeshPipelineKey::LIGHTMAPPED) {
1919            shader_defs.push("LIGHTMAP".into());
1920        }
1921
1922        if key.contains(MeshPipelineKey::TEMPORAL_JITTER) {
1923            shader_defs.push("TEMPORAL_JITTER".into());
1924        }
1925
1926        let shadow_filter_method =
1927            key.intersection(MeshPipelineKey::SHADOW_FILTER_METHOD_RESERVED_BITS);
1928        if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2 {
1929            shader_defs.push("SHADOW_FILTER_METHOD_HARDWARE_2X2".into());
1930        } else if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN {
1931            shader_defs.push("SHADOW_FILTER_METHOD_GAUSSIAN".into());
1932        } else if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL {
1933            shader_defs.push("SHADOW_FILTER_METHOD_TEMPORAL".into());
1934        }
1935
1936        let blur_quality =
1937            key.intersection(MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_RESERVED_BITS);
1938
1939        shader_defs.push(ShaderDefVal::Int(
1940            "SCREEN_SPACE_SPECULAR_TRANSMISSION_BLUR_TAPS".into(),
1941            match blur_quality {
1942                MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_LOW => 4,
1943                MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_MEDIUM => 8,
1944                MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_HIGH => 16,
1945                MeshPipelineKey::SCREEN_SPACE_SPECULAR_TRANSMISSION_ULTRA => 32,
1946                _ => unreachable!(), // Not possible, since the mask is 2 bits, and we've covered all 4 cases
1947            },
1948        ));
1949
1950        if key.contains(MeshPipelineKey::VISIBILITY_RANGE_DITHER) {
1951            shader_defs.push("VISIBILITY_RANGE_DITHER".into());
1952        }
1953
1954        if self.binding_arrays_are_usable {
1955            shader_defs.push("MULTIPLE_LIGHT_PROBES_IN_ARRAY".into());
1956        }
1957
1958        if IRRADIANCE_VOLUMES_ARE_USABLE {
1959            shader_defs.push("IRRADIANCE_VOLUMES_ARE_USABLE".into());
1960        }
1961
1962        let format = if key.contains(MeshPipelineKey::HDR) {
1963            ViewTarget::TEXTURE_FORMAT_HDR
1964        } else {
1965            TextureFormat::bevy_default()
1966        };
1967
1968        // This is defined here so that custom shaders that use something other than
1969        // the mesh binding from bevy_pbr::mesh_bindings can easily make use of this
1970        // in their own shaders.
1971        if let Some(per_object_buffer_batch_size) = self.per_object_buffer_batch_size {
1972            shader_defs.push(ShaderDefVal::UInt(
1973                "PER_OBJECT_BUFFER_BATCH_SIZE".into(),
1974                per_object_buffer_batch_size,
1975            ));
1976        }
1977
1978        Ok(RenderPipelineDescriptor {
1979            vertex: VertexState {
1980                shader: MESH_SHADER_HANDLE,
1981                entry_point: "vertex".into(),
1982                shader_defs: shader_defs.clone(),
1983                buffers: vec![vertex_buffer_layout],
1984            },
1985            fragment: Some(FragmentState {
1986                shader: MESH_SHADER_HANDLE,
1987                shader_defs,
1988                entry_point: "fragment".into(),
1989                targets: vec![Some(ColorTargetState {
1990                    format,
1991                    blend,
1992                    write_mask: ColorWrites::ALL,
1993                })],
1994            }),
1995            layout: bind_group_layout,
1996            push_constant_ranges: vec![],
1997            primitive: PrimitiveState {
1998                front_face: FrontFace::Ccw,
1999                cull_mode: Some(Face::Back),
2000                unclipped_depth: false,
2001                polygon_mode: PolygonMode::Fill,
2002                conservative: false,
2003                topology: key.primitive_topology(),
2004                strip_index_format: None,
2005            },
2006            depth_stencil: Some(DepthStencilState {
2007                format: CORE_3D_DEPTH_FORMAT,
2008                depth_write_enabled,
2009                depth_compare: CompareFunction::GreaterEqual,
2010                stencil: StencilState {
2011                    front: StencilFaceState::IGNORE,
2012                    back: StencilFaceState::IGNORE,
2013                    read_mask: 0,
2014                    write_mask: 0,
2015                },
2016                bias: DepthBiasState {
2017                    constant: 0,
2018                    slope_scale: 0.0,
2019                    clamp: 0.0,
2020                },
2021            }),
2022            multisample: MultisampleState {
2023                count: key.msaa_samples(),
2024                mask: !0,
2025                alpha_to_coverage_enabled,
2026            },
2027            label: Some(label),
2028            zero_initialize_workgroup_memory: false,
2029        })
2030    }
2031}
2032
2033/// Bind groups for meshes currently loaded.
2034#[derive(Resource, Default)]
2035pub struct MeshBindGroups {
2036    model_only: Option<BindGroup>,
2037    skinned: Option<MeshBindGroupPair>,
2038    morph_targets: HashMap<AssetId<Mesh>, MeshBindGroupPair>,
2039    lightmaps: HashMap<AssetId<Image>, BindGroup>,
2040}
2041
2042pub struct MeshBindGroupPair {
2043    motion_vectors: BindGroup,
2044    no_motion_vectors: BindGroup,
2045}
2046
2047impl MeshBindGroups {
2048    pub fn reset(&mut self) {
2049        self.model_only = None;
2050        self.skinned = None;
2051        self.morph_targets.clear();
2052        self.lightmaps.clear();
2053    }
2054    /// Get the `BindGroup` for `RenderMesh` with given `handle_id` and lightmap
2055    /// key `lightmap`.
2056    pub fn get(
2057        &self,
2058        asset_id: AssetId<Mesh>,
2059        lightmap: Option<AssetId<Image>>,
2060        is_skinned: bool,
2061        morph: bool,
2062        motion_vectors: bool,
2063    ) -> Option<&BindGroup> {
2064        match (is_skinned, morph, lightmap) {
2065            (_, true, _) => self
2066                .morph_targets
2067                .get(&asset_id)
2068                .map(|bind_group_pair| bind_group_pair.get(motion_vectors)),
2069            (true, false, _) => self
2070                .skinned
2071                .as_ref()
2072                .map(|bind_group_pair| bind_group_pair.get(motion_vectors)),
2073            (false, false, Some(lightmap)) => self.lightmaps.get(&lightmap),
2074            (false, false, None) => self.model_only.as_ref(),
2075        }
2076    }
2077}
2078
2079impl MeshBindGroupPair {
2080    fn get(&self, motion_vectors: bool) -> &BindGroup {
2081        if motion_vectors {
2082            &self.motion_vectors
2083        } else {
2084            &self.no_motion_vectors
2085        }
2086    }
2087}
2088
2089#[allow(clippy::too_many_arguments)]
2090pub fn prepare_mesh_bind_group(
2091    meshes: Res<RenderAssets<RenderMesh>>,
2092    images: Res<RenderAssets<GpuImage>>,
2093    mut groups: ResMut<MeshBindGroups>,
2094    mesh_pipeline: Res<MeshPipeline>,
2095    render_device: Res<RenderDevice>,
2096    cpu_batched_instance_buffer: Option<
2097        Res<no_gpu_preprocessing::BatchedInstanceBuffer<MeshUniform>>,
2098    >,
2099    gpu_batched_instance_buffers: Option<
2100        Res<gpu_preprocessing::BatchedInstanceBuffers<MeshUniform, MeshInputUniform>>,
2101    >,
2102    skins_uniform: Res<SkinUniforms>,
2103    weights_uniform: Res<MorphUniforms>,
2104    render_lightmaps: Res<RenderLightmaps>,
2105) {
2106    groups.reset();
2107
2108    let layouts = &mesh_pipeline.mesh_layouts;
2109
2110    let model = if let Some(cpu_batched_instance_buffer) = cpu_batched_instance_buffer {
2111        cpu_batched_instance_buffer
2112            .into_inner()
2113            .instance_data_binding()
2114    } else if let Some(gpu_batched_instance_buffers) = gpu_batched_instance_buffers {
2115        gpu_batched_instance_buffers
2116            .into_inner()
2117            .instance_data_binding()
2118    } else {
2119        return;
2120    };
2121    let Some(model) = model else { return };
2122
2123    groups.model_only = Some(layouts.model_only(&render_device, &model));
2124
2125    // Create the skinned mesh bind group with the current and previous buffers
2126    // (the latter being for motion vector computation). If there's no previous
2127    // buffer, just use the current one as the shader will ignore it.
2128    let skin = skins_uniform.current_buffer.buffer();
2129    if let Some(skin) = skin {
2130        let prev_skin = skins_uniform.prev_buffer.buffer().unwrap_or(skin);
2131        groups.skinned = Some(MeshBindGroupPair {
2132            motion_vectors: layouts.skinned_motion(&render_device, &model, skin, prev_skin),
2133            no_motion_vectors: layouts.skinned(&render_device, &model, skin),
2134        });
2135    }
2136
2137    // Create the morphed bind groups just like we did for the skinned bind
2138    // group.
2139    if let Some(weights) = weights_uniform.current_buffer.buffer() {
2140        let prev_weights = weights_uniform.prev_buffer.buffer().unwrap_or(weights);
2141        for (id, gpu_mesh) in meshes.iter() {
2142            if let Some(targets) = gpu_mesh.morph_targets.as_ref() {
2143                let bind_group_pair = match skin.filter(|_| is_skinned(&gpu_mesh.layout)) {
2144                    Some(skin) => {
2145                        let prev_skin = skins_uniform.prev_buffer.buffer().unwrap_or(skin);
2146                        MeshBindGroupPair {
2147                            motion_vectors: layouts.morphed_skinned_motion(
2148                                &render_device,
2149                                &model,
2150                                skin,
2151                                weights,
2152                                targets,
2153                                prev_skin,
2154                                prev_weights,
2155                            ),
2156                            no_motion_vectors: layouts.morphed_skinned(
2157                                &render_device,
2158                                &model,
2159                                skin,
2160                                weights,
2161                                targets,
2162                            ),
2163                        }
2164                    }
2165                    None => MeshBindGroupPair {
2166                        motion_vectors: layouts.morphed_motion(
2167                            &render_device,
2168                            &model,
2169                            weights,
2170                            targets,
2171                            prev_weights,
2172                        ),
2173                        no_motion_vectors: layouts.morphed(
2174                            &render_device,
2175                            &model,
2176                            weights,
2177                            targets,
2178                        ),
2179                    },
2180                };
2181                groups.morph_targets.insert(id, bind_group_pair);
2182            }
2183        }
2184    }
2185
2186    // Create lightmap bindgroups.
2187    for &image_id in &render_lightmaps.all_lightmap_images {
2188        if let (Entry::Vacant(entry), Some(image)) =
2189            (groups.lightmaps.entry(image_id), images.get(image_id))
2190        {
2191            entry.insert(layouts.lightmapped(&render_device, &model, image));
2192        }
2193    }
2194}
2195
2196pub struct SetMeshViewBindGroup<const I: usize>;
2197impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMeshViewBindGroup<I> {
2198    type Param = ();
2199    type ViewQuery = (
2200        Read<ViewUniformOffset>,
2201        Read<ViewLightsUniformOffset>,
2202        Read<ViewFogUniformOffset>,
2203        Read<ViewLightProbesUniformOffset>,
2204        Read<ViewScreenSpaceReflectionsUniformOffset>,
2205        Read<ViewEnvironmentMapUniformOffset>,
2206        Read<MeshViewBindGroup>,
2207        Option<Read<OrderIndependentTransparencySettingsOffset>>,
2208    );
2209    type ItemQuery = ();
2210
2211    #[inline]
2212    fn render<'w>(
2213        _item: &P,
2214        (
2215            view_uniform,
2216            view_lights,
2217            view_fog,
2218            view_light_probes,
2219            view_ssr,
2220            view_environment_map,
2221            mesh_view_bind_group,
2222            maybe_oit_layers_count_offset,
2223        ): ROQueryItem<'w, Self::ViewQuery>,
2224        _entity: Option<()>,
2225        _: SystemParamItem<'w, '_, Self::Param>,
2226        pass: &mut TrackedRenderPass<'w>,
2227    ) -> RenderCommandResult {
2228        let mut offsets: SmallVec<[u32; 8]> = smallvec![
2229            view_uniform.offset,
2230            view_lights.offset,
2231            view_fog.offset,
2232            **view_light_probes,
2233            **view_ssr,
2234            **view_environment_map,
2235        ];
2236        if let Some(layers_count_offset) = maybe_oit_layers_count_offset {
2237            offsets.push(layers_count_offset.offset);
2238        }
2239        pass.set_bind_group(I, &mesh_view_bind_group.value, &offsets);
2240
2241        RenderCommandResult::Success
2242    }
2243}
2244
2245pub struct SetMeshBindGroup<const I: usize>;
2246impl<P: PhaseItem, const I: usize> RenderCommand<P> for SetMeshBindGroup<I> {
2247    type Param = (
2248        SRes<MeshBindGroups>,
2249        SRes<RenderMeshInstances>,
2250        SRes<SkinIndices>,
2251        SRes<MorphIndices>,
2252        SRes<RenderLightmaps>,
2253    );
2254    type ViewQuery = Has<MotionVectorPrepass>;
2255    type ItemQuery = ();
2256
2257    #[inline]
2258    fn render<'w>(
2259        item: &P,
2260        has_motion_vector_prepass: bool,
2261        _item_query: Option<()>,
2262        (bind_groups, mesh_instances, skin_indices, morph_indices, lightmaps): SystemParamItem<
2263            'w,
2264            '_,
2265            Self::Param,
2266        >,
2267        pass: &mut TrackedRenderPass<'w>,
2268    ) -> RenderCommandResult {
2269        let bind_groups = bind_groups.into_inner();
2270        let mesh_instances = mesh_instances.into_inner();
2271        let skin_indices = skin_indices.into_inner();
2272        let morph_indices = morph_indices.into_inner();
2273
2274        let entity = &item.main_entity();
2275
2276        let Some(mesh_asset_id) = mesh_instances.mesh_asset_id(*entity) else {
2277            return RenderCommandResult::Success;
2278        };
2279        let current_skin_index = skin_indices.current.get(entity);
2280        let prev_skin_index = skin_indices.prev.get(entity);
2281        let current_morph_index = morph_indices.current.get(entity);
2282        let prev_morph_index = morph_indices.prev.get(entity);
2283
2284        let is_skinned = current_skin_index.is_some();
2285        let is_morphed = current_morph_index.is_some();
2286
2287        let lightmap = lightmaps
2288            .render_lightmaps
2289            .get(entity)
2290            .map(|render_lightmap| render_lightmap.image);
2291
2292        let Some(bind_group) = bind_groups.get(
2293            mesh_asset_id,
2294            lightmap,
2295            is_skinned,
2296            is_morphed,
2297            has_motion_vector_prepass,
2298        ) else {
2299            return RenderCommandResult::Failure(
2300                "The MeshBindGroups resource wasn't set in the render phase. \
2301                It should be set by the prepare_mesh_bind_group system.\n\
2302                This is a bevy bug! Please open an issue.",
2303            );
2304        };
2305
2306        let mut dynamic_offsets: [u32; 3] = Default::default();
2307        let mut offset_count = 0;
2308        if let Some(dynamic_offset) = item.extra_index().as_dynamic_offset() {
2309            dynamic_offsets[offset_count] = dynamic_offset.get();
2310            offset_count += 1;
2311        }
2312        if let Some(current_skin_index) = current_skin_index {
2313            dynamic_offsets[offset_count] = current_skin_index.index;
2314            offset_count += 1;
2315        }
2316        if let Some(current_morph_index) = current_morph_index {
2317            dynamic_offsets[offset_count] = current_morph_index.index;
2318            offset_count += 1;
2319        }
2320
2321        // Attach motion vectors if needed.
2322        if has_motion_vector_prepass {
2323            // Attach the previous skin index for motion vector computation. If
2324            // there isn't one, just use zero as the shader will ignore it.
2325            if current_skin_index.is_some() {
2326                match prev_skin_index {
2327                    Some(prev_skin_index) => dynamic_offsets[offset_count] = prev_skin_index.index,
2328                    None => dynamic_offsets[offset_count] = 0,
2329                }
2330                offset_count += 1;
2331            }
2332
2333            // Attach the previous morph index for motion vector computation. If
2334            // there isn't one, just use zero as the shader will ignore it.
2335            if current_morph_index.is_some() {
2336                match prev_morph_index {
2337                    Some(prev_morph_index) => {
2338                        dynamic_offsets[offset_count] = prev_morph_index.index;
2339                    }
2340                    None => dynamic_offsets[offset_count] = 0,
2341                }
2342                offset_count += 1;
2343            }
2344        }
2345
2346        pass.set_bind_group(I, bind_group, &dynamic_offsets[0..offset_count]);
2347
2348        RenderCommandResult::Success
2349    }
2350}
2351
2352pub struct DrawMesh;
2353impl<P: PhaseItem> RenderCommand<P> for DrawMesh {
2354    type Param = (
2355        SRes<RenderAssets<RenderMesh>>,
2356        SRes<RenderMeshInstances>,
2357        SRes<IndirectParametersBuffer>,
2358        SRes<PipelineCache>,
2359        SRes<MeshAllocator>,
2360        Option<SRes<PreprocessPipelines>>,
2361    );
2362    type ViewQuery = Has<PreprocessBindGroup>;
2363    type ItemQuery = ();
2364    #[inline]
2365    fn render<'w>(
2366        item: &P,
2367        has_preprocess_bind_group: ROQueryItem<Self::ViewQuery>,
2368        _item_query: Option<()>,
2369        (
2370            meshes,
2371            mesh_instances,
2372            indirect_parameters_buffer,
2373            pipeline_cache,
2374            mesh_allocator,
2375            preprocess_pipelines,
2376        ): SystemParamItem<'w, '_, Self::Param>,
2377        pass: &mut TrackedRenderPass<'w>,
2378    ) -> RenderCommandResult {
2379        // If we're using GPU preprocessing, then we're dependent on that
2380        // compute shader having been run, which of course can only happen if
2381        // it's compiled. Otherwise, our mesh instance data won't be present.
2382        if let Some(preprocess_pipelines) = preprocess_pipelines {
2383            if !has_preprocess_bind_group
2384                || !preprocess_pipelines.pipelines_are_loaded(&pipeline_cache)
2385            {
2386                return RenderCommandResult::Skip;
2387            }
2388        }
2389
2390        let meshes = meshes.into_inner();
2391        let mesh_instances = mesh_instances.into_inner();
2392        let indirect_parameters_buffer = indirect_parameters_buffer.into_inner();
2393        let mesh_allocator = mesh_allocator.into_inner();
2394
2395        let Some(mesh_asset_id) = mesh_instances.mesh_asset_id(item.main_entity()) else {
2396            return RenderCommandResult::Skip;
2397        };
2398        let Some(gpu_mesh) = meshes.get(mesh_asset_id) else {
2399            return RenderCommandResult::Skip;
2400        };
2401        let Some(vertex_buffer_slice) = mesh_allocator.mesh_vertex_slice(&mesh_asset_id) else {
2402            return RenderCommandResult::Skip;
2403        };
2404
2405        // Calculate the indirect offset, and look up the buffer.
2406        let indirect_parameters = match item.extra_index().as_indirect_parameters_index() {
2407            None => None,
2408            Some(index) => match indirect_parameters_buffer.buffer() {
2409                None => {
2410                    warn!("Not rendering mesh because indirect parameters buffer wasn't present");
2411                    return RenderCommandResult::Skip;
2412                }
2413                Some(buffer) => Some((
2414                    index as u64 * size_of::<IndirectParameters>() as u64,
2415                    buffer,
2416                )),
2417            },
2418        };
2419
2420        pass.set_vertex_buffer(0, vertex_buffer_slice.buffer.slice(..));
2421
2422        let batch_range = item.batch_range();
2423
2424        // Draw either directly or indirectly, as appropriate.
2425        match &gpu_mesh.buffer_info {
2426            RenderMeshBufferInfo::Indexed {
2427                index_format,
2428                count,
2429            } => {
2430                let Some(index_buffer_slice) = mesh_allocator.mesh_index_slice(&mesh_asset_id)
2431                else {
2432                    return RenderCommandResult::Skip;
2433                };
2434
2435                pass.set_index_buffer(index_buffer_slice.buffer.slice(..), 0, *index_format);
2436
2437                match indirect_parameters {
2438                    None => {
2439                        pass.draw_indexed(
2440                            index_buffer_slice.range.start
2441                                ..(index_buffer_slice.range.start + *count),
2442                            vertex_buffer_slice.range.start as i32,
2443                            batch_range.clone(),
2444                        );
2445                    }
2446                    Some((indirect_parameters_offset, indirect_parameters_buffer)) => pass
2447                        .draw_indexed_indirect(
2448                            indirect_parameters_buffer,
2449                            indirect_parameters_offset,
2450                        ),
2451                }
2452            }
2453            RenderMeshBufferInfo::NonIndexed => match indirect_parameters {
2454                None => {
2455                    pass.draw(vertex_buffer_slice.range, batch_range.clone());
2456                }
2457                Some((indirect_parameters_offset, indirect_parameters_buffer)) => {
2458                    pass.draw_indirect(indirect_parameters_buffer, indirect_parameters_offset);
2459                }
2460            },
2461        }
2462        RenderCommandResult::Success
2463    }
2464}
2465
2466#[cfg(test)]
2467mod tests {
2468    use super::MeshPipelineKey;
2469    #[test]
2470    fn mesh_key_msaa_samples() {
2471        for i in [1, 2, 4, 8, 16, 32, 64, 128] {
2472            assert_eq!(MeshPipelineKey::from_msaa_samples(i).msaa_samples(), i);
2473        }
2474    }
2475}