bevy_render/camera/
camera.rs

1#![expect(
2    clippy::module_inception,
3    reason = "The parent module contains all things viewport-related, while this module handles cameras as a component. However, a rename/refactor which should clear up this lint is being discussed; see #17196."
4)]
5use super::{ClearColorConfig, Projection};
6use crate::{
7    batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport},
8    camera::{CameraProjection, ManualTextureViewHandle, ManualTextureViews},
9    primitives::Frustum,
10    render_asset::RenderAssets,
11    render_graph::{InternedRenderSubGraph, RenderSubGraph},
12    render_resource::TextureView,
13    sync_world::{RenderEntity, SyncToRenderWorld},
14    texture::GpuImage,
15    view::{
16        ColorGrading, ExtractedView, ExtractedWindows, Msaa, NoIndirectDrawing, RenderLayers,
17        RenderVisibleEntities, RetainedViewEntity, ViewUniformOffset, Visibility, VisibleEntities,
18    },
19    Extract,
20};
21use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
22use bevy_derive::{Deref, DerefMut};
23use bevy_ecs::{
24    change_detection::DetectChanges,
25    component::{Component, HookContext},
26    entity::{ContainsEntity, Entity},
27    event::EventReader,
28    prelude::With,
29    query::Has,
30    reflect::ReflectComponent,
31    resource::Resource,
32    system::{Commands, Query, Res, ResMut},
33    world::DeferredWorld,
34};
35use bevy_image::Image;
36use bevy_math::{ops, vec2, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, UVec4, Vec2, Vec3};
37use bevy_platform::collections::{HashMap, HashSet};
38use bevy_reflect::prelude::*;
39use bevy_render_macros::ExtractComponent;
40use bevy_transform::components::{GlobalTransform, Transform};
41use bevy_window::{
42    NormalizedWindowRef, PrimaryWindow, Window, WindowCreated, WindowRef, WindowResized,
43    WindowScaleFactorChanged,
44};
45use core::ops::Range;
46use derive_more::derive::From;
47use thiserror::Error;
48use tracing::warn;
49use wgpu::{BlendState, TextureFormat, TextureUsages};
50
51/// Render viewport configuration for the [`Camera`] component.
52///
53/// The viewport defines the area on the render target to which the camera renders its image.
54/// You can overlay multiple cameras in a single window using viewports to create effects like
55/// split screen, minimaps, and character viewers.
56#[derive(Reflect, Debug, Clone)]
57#[reflect(Default, Clone)]
58pub struct Viewport {
59    /// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].
60    /// (0,0) corresponds to the top-left corner
61    pub physical_position: UVec2,
62    /// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].
63    /// The origin of the rectangle is in the top-left corner.
64    pub physical_size: UVec2,
65    /// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).
66    pub depth: Range<f32>,
67}
68
69impl Default for Viewport {
70    fn default() -> Self {
71        Self {
72            physical_position: Default::default(),
73            physical_size: UVec2::new(1, 1),
74            depth: 0.0..1.0,
75        }
76    }
77}
78
79impl Viewport {
80    /// Cut the viewport rectangle so that it lies inside a rectangle of the
81    /// given size.
82    ///
83    /// If either of the viewport's position coordinates lies outside the given
84    /// dimensions, it will be moved just inside first. If either of the given
85    /// dimensions is zero, the position and size of the viewport rectangle will
86    /// both be set to zero in that dimension.
87    pub fn clamp_to_size(&mut self, size: UVec2) {
88        // If the origin of the viewport rect is outside, then adjust so that
89        // it's just barely inside. Then, cut off the part that is outside.
90        if self.physical_size.x + self.physical_position.x > size.x {
91            if self.physical_position.x < size.x {
92                self.physical_size.x = size.x - self.physical_position.x;
93            } else if size.x > 0 {
94                self.physical_position.x = size.x - 1;
95                self.physical_size.x = 1;
96            } else {
97                self.physical_position.x = 0;
98                self.physical_size.x = 0;
99            }
100        }
101        if self.physical_size.y + self.physical_position.y > size.y {
102            if self.physical_position.y < size.y {
103                self.physical_size.y = size.y - self.physical_position.y;
104            } else if size.y > 0 {
105                self.physical_position.y = size.y - 1;
106                self.physical_size.y = 1;
107            } else {
108                self.physical_position.y = 0;
109                self.physical_size.y = 0;
110            }
111        }
112    }
113}
114
115/// Settings to define a camera sub view.
116///
117/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the
118/// image defined by `size` and `offset` (relative to the `full_size` of the
119/// whole image) is projected to the cameras viewport.
120///
121/// Take the example of the following multi-monitor setup:
122/// ```css
123/// ┌───┬───┐
124/// │ A │ B │
125/// ├───┼───┤
126/// │ C │ D │
127/// └───┴───┘
128/// ```
129/// If each monitor is 1920x1080, the whole image will have a resolution of
130/// 3840x2160. For each monitor we can use a single camera with a viewport of
131/// the same size as the monitor it corresponds to. To ensure that the image is
132/// cohesive, we can use a different sub view on each camera:
133/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0
134/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0
135/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080
136/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =
137///   1920,1080
138///
139/// However since only the ratio between the values is important, they could all
140/// be divided by 120 and still produce the same image. Camera D would for
141/// example have the following values:
142/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9
143#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
144#[reflect(Clone, PartialEq, Default)]
145pub struct SubCameraView {
146    /// Size of the entire camera view
147    pub full_size: UVec2,
148    /// Offset of the sub camera
149    pub offset: Vec2,
150    /// Size of the sub camera
151    pub size: UVec2,
152}
153
154impl Default for SubCameraView {
155    fn default() -> Self {
156        Self {
157            full_size: UVec2::new(1, 1),
158            offset: Vec2::new(0., 0.),
159            size: UVec2::new(1, 1),
160        }
161    }
162}
163
164/// Information about the current [`RenderTarget`].
165#[derive(Default, Debug, Clone)]
166pub struct RenderTargetInfo {
167    /// The physical size of this render target (in physical pixels, ignoring scale factor).
168    pub physical_size: UVec2,
169    /// The scale factor of this render target.
170    ///
171    /// When rendering to a window, typically it is a value greater or equal than 1.0,
172    /// representing the ratio between the size of the window in physical pixels and the logical size of the window.
173    pub scale_factor: f32,
174}
175
176/// Holds internally computed [`Camera`] values.
177#[derive(Default, Debug, Clone)]
178pub struct ComputedCameraValues {
179    clip_from_view: Mat4,
180    target_info: Option<RenderTargetInfo>,
181    // size of the `Viewport`
182    old_viewport_size: Option<UVec2>,
183    old_sub_camera_view: Option<SubCameraView>,
184}
185
186/// How much energy a `Camera3d` absorbs from incoming light.
187///
188/// <https://en.wikipedia.org/wiki/Exposure_(photography)>
189#[derive(Component, Clone, Copy, Reflect)]
190#[reflect(opaque)]
191#[reflect(Component, Default, Clone)]
192pub struct Exposure {
193    /// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>
194    pub ev100: f32,
195}
196
197impl Exposure {
198    pub const SUNLIGHT: Self = Self {
199        ev100: Self::EV100_SUNLIGHT,
200    };
201    pub const OVERCAST: Self = Self {
202        ev100: Self::EV100_OVERCAST,
203    };
204    pub const INDOOR: Self = Self {
205        ev100: Self::EV100_INDOOR,
206    };
207    /// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
208    /// It also happens to be a reasonable default.
209    ///
210    /// See <https://github.com/bevyengine/bevy/issues/11577> for details.
211    pub const BLENDER: Self = Self {
212        ev100: Self::EV100_BLENDER,
213    };
214
215    pub const EV100_SUNLIGHT: f32 = 15.0;
216    pub const EV100_OVERCAST: f32 = 12.0;
217    pub const EV100_INDOOR: f32 = 7.0;
218
219    /// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
220    /// It also happens to be a reasonable default.
221    ///
222    /// See <https://github.com/bevyengine/bevy/issues/11577> for details.
223    pub const EV100_BLENDER: f32 = 9.7;
224
225    pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
226        Self {
227            ev100: physical_camera_parameters.ev100(),
228        }
229    }
230
231    /// Converts EV100 values to exposure values.
232    /// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>
233    #[inline]
234    pub fn exposure(&self) -> f32 {
235        ops::exp2(-self.ev100) / 1.2
236    }
237}
238
239impl Default for Exposure {
240    fn default() -> Self {
241        Self::BLENDER
242    }
243}
244
245/// Parameters based on physical camera characteristics for calculating EV100
246/// values for use with [`Exposure`]. This is also used for depth of field.
247#[derive(Clone, Copy)]
248pub struct PhysicalCameraParameters {
249    /// <https://en.wikipedia.org/wiki/F-number>
250    pub aperture_f_stops: f32,
251    /// <https://en.wikipedia.org/wiki/Shutter_speed>
252    pub shutter_speed_s: f32,
253    /// <https://en.wikipedia.org/wiki/Film_speed>
254    pub sensitivity_iso: f32,
255    /// The height of the [image sensor format] in meters.
256    ///
257    /// Focal length is derived from the FOV and this value. The default is
258    /// 18.66mm, matching the [Super 35] format, which is popular in cinema.
259    ///
260    /// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format
261    ///
262    /// [Super 35]: https://en.wikipedia.org/wiki/Super_35
263    pub sensor_height: f32,
264}
265
266impl PhysicalCameraParameters {
267    /// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).
268    pub fn ev100(&self) -> f32 {
269        ops::log2(
270            self.aperture_f_stops * self.aperture_f_stops * 100.0
271                / (self.shutter_speed_s * self.sensitivity_iso),
272        )
273    }
274}
275
276impl Default for PhysicalCameraParameters {
277    fn default() -> Self {
278        Self {
279            aperture_f_stops: 1.0,
280            shutter_speed_s: 1.0 / 125.0,
281            sensitivity_iso: 100.0,
282            sensor_height: 0.01866,
283        }
284    }
285}
286
287/// Error returned when a conversion between world-space and viewport-space coordinates fails.
288///
289/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].
290#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]
291pub enum ViewportConversionError {
292    /// The pre-computed size of the viewport was not available.
293    ///
294    /// This may be because the `Camera` was just created and [`camera_system`] has not been executed
295    /// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:
296    ///   - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
297    ///   - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
298    ///   - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
299    ///   - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
300    #[error("pre-computed size of viewport not available")]
301    NoViewportSize,
302    /// The computed coordinate was beyond the `Camera`'s near plane.
303    ///
304    /// Only applicable when converting from world-space to viewport-space.
305    #[error("computed coordinate beyond `Camera`'s near plane")]
306    PastNearPlane,
307    /// The computed coordinate was beyond the `Camera`'s far plane.
308    ///
309    /// Only applicable when converting from world-space to viewport-space.
310    #[error("computed coordinate beyond `Camera`'s far plane")]
311    PastFarPlane,
312    /// The Normalized Device Coordinates could not be computed because the `camera_transform`, the
313    /// `world_position`, or the projection matrix defined by [`CameraProjection`] contained `NAN`
314    /// (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).
315    #[error("found NaN while computing NDC")]
316    InvalidData,
317}
318
319/// The defining [`Component`] for camera entities,
320/// storing information about how and what to render through this camera.
321///
322/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from
323/// which rendering occurs. It defines the position of the view to render, the projection method
324/// to transform the 3D objects into a 2D image, as well as the render target into which that image
325/// is produced.
326///
327/// Note that a [`Camera`] needs a [`CameraRenderGraph`] to render anything.
328/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,
329/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render
330/// graph will emit an error at runtime.
331///
332/// [`Camera2d`]: https://docs.rs/bevy/latest/bevy/core_pipeline/core_2d/struct.Camera2d.html
333/// [`Camera3d`]: https://docs.rs/bevy/latest/bevy/core_pipeline/core_3d/struct.Camera3d.html
334#[derive(Component, Debug, Reflect, Clone)]
335#[reflect(Component, Default, Debug, Clone)]
336#[component(on_add = warn_on_no_render_graph)]
337#[require(
338    Frustum,
339    CameraMainTextureUsages,
340    VisibleEntities,
341    Transform,
342    Visibility,
343    Msaa,
344    SyncToRenderWorld
345)]
346pub struct Camera {
347    /// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].
348    pub viewport: Option<Viewport>,
349    /// Cameras with a higher order are rendered later, and thus on top of lower order cameras.
350    pub order: isize,
351    /// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this
352    /// camera will not be rendered.
353    pub is_active: bool,
354    /// Computed values for this camera, such as the projection matrix and the render target size.
355    #[reflect(ignore, clone)]
356    pub computed: ComputedCameraValues,
357    /// The "target" that this camera will render to.
358    pub target: RenderTarget,
359    /// If this is set to `true`, the camera will use an intermediate "high dynamic range" render texture.
360    /// This allows rendering with a wider range of lighting values.
361    pub hdr: bool,
362    // todo: reflect this when #6042 lands
363    /// The [`CameraOutputMode`] for this camera.
364    #[reflect(ignore, clone)]
365    pub output_mode: CameraOutputMode,
366    /// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's
367    /// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to
368    /// "write their results on top" of previous camera results, and include them as a part of their render results. This is enabled by default to ensure
369    /// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.
370    pub msaa_writeback: bool,
371    /// The clear color operation to perform on the render target.
372    pub clear_color: ClearColorConfig,
373    /// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].
374    pub sub_camera_view: Option<SubCameraView>,
375}
376
377fn warn_on_no_render_graph(world: DeferredWorld, HookContext { entity, caller, .. }: HookContext) {
378    if !world.entity(entity).contains::<CameraRenderGraph>() {
379        warn!("{}Entity {entity} has a `Camera` component, but it doesn't have a render graph configured. Consider adding a `Camera2d` or `Camera3d` component, or manually adding a `CameraRenderGraph` component if you need a custom render graph.", caller.map(|location|format!("{location}: ")).unwrap_or_default());
380    }
381}
382
383impl Default for Camera {
384    fn default() -> Self {
385        Self {
386            is_active: true,
387            order: 0,
388            viewport: None,
389            computed: Default::default(),
390            target: Default::default(),
391            output_mode: Default::default(),
392            hdr: false,
393            msaa_writeback: true,
394            clear_color: Default::default(),
395            sub_camera_view: None,
396        }
397    }
398}
399
400impl Camera {
401    /// Converts a physical size in this `Camera` to a logical size.
402    #[inline]
403    pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
404        let scale = self.computed.target_info.as_ref()?.scale_factor;
405        Some(physical_size.as_vec2() / scale)
406    }
407
408    /// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is
409    /// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to
410    /// the full physical rect of the current [`RenderTarget`].
411    #[inline]
412    pub fn physical_viewport_rect(&self) -> Option<URect> {
413        let min = self
414            .viewport
415            .as_ref()
416            .map(|v| v.physical_position)
417            .unwrap_or(UVec2::ZERO);
418        let max = min + self.physical_viewport_size()?;
419        Some(URect { min, max })
420    }
421
422    /// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to
423    /// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the
424    /// full logical rect of the current [`RenderTarget`].
425    #[inline]
426    pub fn logical_viewport_rect(&self) -> Option<Rect> {
427        let URect { min, max } = self.physical_viewport_rect()?;
428        Some(Rect {
429            min: self.to_logical(min)?,
430            max: self.to_logical(max)?,
431        })
432    }
433
434    /// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this
435    /// will be the size of that custom viewport. Otherwise it will default to the full logical size
436    /// of the current [`RenderTarget`].
437    ///  For logic that requires the full logical size of the
438    /// [`RenderTarget`], prefer [`Camera::logical_target_size`].
439    ///
440    /// Returns `None` if either:
441    /// - the function is called just after the `Camera` is created, before `camera_system` is executed,
442    /// - the [`RenderTarget`] isn't correctly set:
443    ///   - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
444    ///   - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
445    ///   - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
446    ///   - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
447    #[inline]
448    pub fn logical_viewport_size(&self) -> Option<Vec2> {
449        self.viewport
450            .as_ref()
451            .and_then(|v| self.to_logical(v.physical_size))
452            .or_else(|| self.logical_target_size())
453    }
454
455    /// The physical size of this camera's viewport (in physical pixels).
456    /// If the `viewport` field is set to [`Some`], this
457    /// will be the size of that custom viewport. Otherwise it will default to the full physical size of
458    /// the current [`RenderTarget`].
459    /// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].
460    #[inline]
461    pub fn physical_viewport_size(&self) -> Option<UVec2> {
462        self.viewport
463            .as_ref()
464            .map(|v| v.physical_size)
465            .or_else(|| self.physical_target_size())
466    }
467
468    /// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.
469    /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
470    /// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].
471    #[inline]
472    pub fn logical_target_size(&self) -> Option<Vec2> {
473        self.computed
474            .target_info
475            .as_ref()
476            .and_then(|t| self.to_logical(t.physical_size))
477    }
478
479    /// The full physical size of this camera's [`RenderTarget`] (in physical pixels),
480    /// ignoring custom `viewport` configuration.
481    /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
482    /// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].
483    #[inline]
484    pub fn physical_target_size(&self) -> Option<UVec2> {
485        self.computed.target_info.as_ref().map(|t| t.physical_size)
486    }
487
488    #[inline]
489    pub fn target_scaling_factor(&self) -> Option<f32> {
490        self.computed
491            .target_info
492            .as_ref()
493            .map(|t: &RenderTargetInfo| t.scale_factor)
494    }
495
496    /// The projection matrix computed using this camera's [`CameraProjection`].
497    #[inline]
498    pub fn clip_from_view(&self) -> Mat4 {
499        self.computed.clip_from_view
500    }
501
502    /// Given a position in world space, use the camera to compute the viewport-space coordinates.
503    ///
504    /// To get the coordinates in Normalized Device Coordinates, you should use
505    /// [`world_to_ndc`](Self::world_to_ndc).
506    ///
507    /// # Panics
508    ///
509    /// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
510    /// (see [`world_to_ndc`][Self::world_to_ndc]).
511    #[doc(alias = "world_to_screen")]
512    pub fn world_to_viewport(
513        &self,
514        camera_transform: &GlobalTransform,
515        world_position: Vec3,
516    ) -> Result<Vec2, ViewportConversionError> {
517        let target_rect = self
518            .logical_viewport_rect()
519            .ok_or(ViewportConversionError::NoViewportSize)?;
520        let mut ndc_space_coords = self
521            .world_to_ndc(camera_transform, world_position)
522            .ok_or(ViewportConversionError::InvalidData)?;
523        // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
524        if ndc_space_coords.z < 0.0 {
525            return Err(ViewportConversionError::PastNearPlane);
526        }
527        if ndc_space_coords.z > 1.0 {
528            return Err(ViewportConversionError::PastFarPlane);
529        }
530
531        // Flip the Y co-ordinate origin from the bottom to the top.
532        ndc_space_coords.y = -ndc_space_coords.y;
533
534        // Once in NDC space, we can discard the z element and map x/y to the viewport rect
535        let viewport_position =
536            (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
537        Ok(viewport_position)
538    }
539
540    /// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.
541    ///
542    /// To get the coordinates in Normalized Device Coordinates, you should use
543    /// [`world_to_ndc`](Self::world_to_ndc).
544    ///
545    /// # Panics
546    ///
547    /// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
548    /// (see [`world_to_ndc`][Self::world_to_ndc]).
549    #[doc(alias = "world_to_screen_with_depth")]
550    pub fn world_to_viewport_with_depth(
551        &self,
552        camera_transform: &GlobalTransform,
553        world_position: Vec3,
554    ) -> Result<Vec3, ViewportConversionError> {
555        let target_rect = self
556            .logical_viewport_rect()
557            .ok_or(ViewportConversionError::NoViewportSize)?;
558        let mut ndc_space_coords = self
559            .world_to_ndc(camera_transform, world_position)
560            .ok_or(ViewportConversionError::InvalidData)?;
561        // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
562        if ndc_space_coords.z < 0.0 {
563            return Err(ViewportConversionError::PastNearPlane);
564        }
565        if ndc_space_coords.z > 1.0 {
566            return Err(ViewportConversionError::PastFarPlane);
567        }
568
569        // Stretching ndc depth to value via near plane and negating result to be in positive room again.
570        let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);
571
572        // Flip the Y co-ordinate origin from the bottom to the top.
573        ndc_space_coords.y = -ndc_space_coords.y;
574
575        // Once in NDC space, we can discard the z element and map x/y to the viewport rect
576        let viewport_position =
577            (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
578        Ok(viewport_position.extend(depth))
579    }
580
581    /// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.
582    ///
583    /// The resulting ray starts on the near plane of the camera.
584    ///
585    /// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.
586    ///
587    /// To get the world space coordinates with Normalized Device Coordinates, you should use
588    /// [`ndc_to_world`](Self::ndc_to_world).
589    ///
590    /// # Panics
591    ///
592    /// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
593    /// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
594    pub fn viewport_to_world(
595        &self,
596        camera_transform: &GlobalTransform,
597        viewport_position: Vec2,
598    ) -> Result<Ray3d, ViewportConversionError> {
599        let target_rect = self
600            .logical_viewport_rect()
601            .ok_or(ViewportConversionError::NoViewportSize)?;
602        let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();
603        // Flip the Y co-ordinate origin from the top to the bottom.
604        rect_relative.y = 1.0 - rect_relative.y;
605
606        let ndc = rect_relative * 2. - Vec2::ONE;
607        let ndc_to_world =
608            camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
609        let world_near_plane = ndc_to_world.project_point3(ndc.extend(1.));
610        // Using EPSILON because an ndc with Z = 0 returns NaNs.
611        let world_far_plane = ndc_to_world.project_point3(ndc.extend(f32::EPSILON));
612
613        // The fallible direction constructor ensures that world_near_plane and world_far_plane aren't NaN.
614        Dir3::new(world_far_plane - world_near_plane)
615            .map_err(|_| ViewportConversionError::InvalidData)
616            .map(|direction| Ray3d {
617                origin: world_near_plane,
618                direction,
619            })
620    }
621
622    /// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.
623    ///
624    /// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.
625    ///
626    /// To get the world space coordinates with Normalized Device Coordinates, you should use
627    /// [`ndc_to_world`](Self::ndc_to_world).
628    ///
629    /// # Panics
630    ///
631    /// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
632    /// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
633    pub fn viewport_to_world_2d(
634        &self,
635        camera_transform: &GlobalTransform,
636        viewport_position: Vec2,
637    ) -> Result<Vec2, ViewportConversionError> {
638        let target_rect = self
639            .logical_viewport_rect()
640            .ok_or(ViewportConversionError::NoViewportSize)?;
641        let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();
642
643        // Flip the Y co-ordinate origin from the top to the bottom.
644        rect_relative.y = 1.0 - rect_relative.y;
645
646        let ndc = rect_relative * 2. - Vec2::ONE;
647
648        let world_near_plane = self
649            .ndc_to_world(camera_transform, ndc.extend(1.))
650            .ok_or(ViewportConversionError::InvalidData)?;
651
652        Ok(world_near_plane.truncate())
653    }
654
655    /// Given a position in world space, use the camera's viewport to compute the Normalized Device Coordinates.
656    ///
657    /// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,
658    /// and between 0.0 and 1.0 on the Z axis.
659    /// To get the coordinates in the render target's viewport dimensions, you should use
660    /// [`world_to_viewport`](Self::world_to_viewport).
661    ///
662    /// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by [`CameraProjection`] contain `NAN`.
663    ///
664    /// # Panics
665    ///
666    /// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.
667    pub fn world_to_ndc(
668        &self,
669        camera_transform: &GlobalTransform,
670        world_position: Vec3,
671    ) -> Option<Vec3> {
672        // Build a transformation matrix to convert from world space to NDC using camera data
673        let clip_from_world: Mat4 =
674            self.computed.clip_from_view * camera_transform.compute_matrix().inverse();
675        let ndc_space_coords: Vec3 = clip_from_world.project_point3(world_position);
676
677        (!ndc_space_coords.is_nan()).then_some(ndc_space_coords)
678    }
679
680    /// Given a position in Normalized Device Coordinates,
681    /// use the camera's viewport to compute the world space position.
682    ///
683    /// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,
684    /// and between 0.0 and 1.0 on the Z axis.
685    /// To get the world space coordinates with the viewport position, you should use
686    /// [`world_to_viewport`](Self::world_to_viewport).
687    ///
688    /// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by [`CameraProjection`] contain `NAN`.
689    ///
690    /// # Panics
691    ///
692    /// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.
693    pub fn ndc_to_world(&self, camera_transform: &GlobalTransform, ndc: Vec3) -> Option<Vec3> {
694        // Build a transformation matrix to convert from NDC to world space using camera data
695        let ndc_to_world =
696            camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
697
698        let world_space_coords = ndc_to_world.project_point3(ndc);
699
700        (!world_space_coords.is_nan()).then_some(world_space_coords)
701    }
702
703    /// Converts the depth in Normalized Device Coordinates
704    /// to linear view z for perspective projections.
705    ///
706    /// Note: Depth values in front of the camera will be negative as -z is forward
707    pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
708        let near = self.clip_from_view().w_axis.z; // [3][2]
709        -near / ndc_depth
710    }
711
712    /// Converts the depth in Normalized Device Coordinates
713    /// to linear view z for orthographic projections.
714    ///
715    /// Note: Depth values in front of the camera will be negative as -z is forward
716    pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
717        -(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
718        //                       [3][2]                                         [2][2]
719    }
720}
721
722/// Control how this camera outputs once rendering is completed.
723#[derive(Debug, Clone, Copy)]
724pub enum CameraOutputMode {
725    /// Writes the camera output to configured render target.
726    Write {
727        /// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.
728        blend_state: Option<BlendState>,
729        /// The clear color operation to perform on the final render target texture.
730        clear_color: ClearColorConfig,
731    },
732    /// Skips writing the camera output to the configured render target. The output will remain in the
733    /// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target
734    /// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause
735    /// them to be lost. Only use this if you know what you are doing!
736    /// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove
737    /// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.
738    Skip,
739}
740
741impl Default for CameraOutputMode {
742    fn default() -> Self {
743        CameraOutputMode::Write {
744            blend_state: None,
745            clear_color: ClearColorConfig::Default,
746        }
747    }
748}
749
750/// Configures the [`RenderGraph`](crate::render_graph::RenderGraph) name assigned to be run for a given [`Camera`] entity.
751#[derive(Component, Debug, Deref, DerefMut, Reflect, Clone)]
752#[reflect(opaque)]
753#[reflect(Component, Debug, Clone)]
754pub struct CameraRenderGraph(InternedRenderSubGraph);
755
756impl CameraRenderGraph {
757    /// Creates a new [`CameraRenderGraph`] from any string-like type.
758    #[inline]
759    pub fn new<T: RenderSubGraph>(name: T) -> Self {
760        Self(name.intern())
761    }
762
763    /// Sets the graph name.
764    #[inline]
765    pub fn set<T: RenderSubGraph>(&mut self, name: T) {
766        self.0 = name.intern();
767    }
768}
769
770/// The "target" that a [`Camera`] will render to. For example, this could be a [`Window`]
771/// swapchain or an [`Image`].
772#[derive(Debug, Clone, Reflect, From)]
773#[reflect(Clone)]
774pub enum RenderTarget {
775    /// Window to which the camera's view is rendered.
776    Window(WindowRef),
777    /// Image to which the camera's view is rendered.
778    Image(ImageRenderTarget),
779    /// Texture View to which the camera's view is rendered.
780    /// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
781    TextureView(ManualTextureViewHandle),
782}
783
784/// A render target that renders to an [`Image`].
785#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord)]
786#[reflect(Clone, PartialEq, Hash)]
787pub struct ImageRenderTarget {
788    /// The image to render to.
789    pub handle: Handle<Image>,
790    /// The scale factor of the render target image, corresponding to the scale
791    /// factor for a window target. This should almost always be 1.0.
792    pub scale_factor: FloatOrd,
793}
794
795impl From<Handle<Image>> for RenderTarget {
796    fn from(handle: Handle<Image>) -> Self {
797        Self::Image(handle.into())
798    }
799}
800
801impl From<Handle<Image>> for ImageRenderTarget {
802    fn from(handle: Handle<Image>) -> Self {
803        Self {
804            handle,
805            scale_factor: FloatOrd(1.0),
806        }
807    }
808}
809
810impl Default for RenderTarget {
811    fn default() -> Self {
812        Self::Window(Default::default())
813    }
814}
815
816/// Normalized version of the render target.
817///
818/// Once we have this we shouldn't need to resolve it down anymore.
819#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
820#[reflect(Clone, PartialEq, Hash)]
821pub enum NormalizedRenderTarget {
822    /// Window to which the camera's view is rendered.
823    Window(NormalizedWindowRef),
824    /// Image to which the camera's view is rendered.
825    Image(ImageRenderTarget),
826    /// Texture View to which the camera's view is rendered.
827    /// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
828    TextureView(ManualTextureViewHandle),
829}
830
831impl RenderTarget {
832    /// Normalize the render target down to a more concrete value, mostly used for equality comparisons.
833    pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
834        match self {
835            RenderTarget::Window(window_ref) => window_ref
836                .normalize(primary_window)
837                .map(NormalizedRenderTarget::Window),
838            RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
839            RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
840        }
841    }
842
843    /// Get a handle to the render target's image,
844    /// or `None` if the render target is another variant.
845    pub fn as_image(&self) -> Option<&Handle<Image>> {
846        if let Self::Image(image_target) = self {
847            Some(&image_target.handle)
848        } else {
849            None
850        }
851    }
852}
853
854impl NormalizedRenderTarget {
855    pub fn get_texture_view<'a>(
856        &self,
857        windows: &'a ExtractedWindows,
858        images: &'a RenderAssets<GpuImage>,
859        manual_texture_views: &'a ManualTextureViews,
860    ) -> Option<&'a TextureView> {
861        match self {
862            NormalizedRenderTarget::Window(window_ref) => windows
863                .get(&window_ref.entity())
864                .and_then(|window| window.swap_chain_texture_view.as_ref()),
865            NormalizedRenderTarget::Image(image_target) => images
866                .get(&image_target.handle)
867                .map(|image| &image.texture_view),
868            NormalizedRenderTarget::TextureView(id) => {
869                manual_texture_views.get(id).map(|tex| &tex.texture_view)
870            }
871        }
872    }
873
874    /// Retrieves the [`TextureFormat`] of this render target, if it exists.
875    pub fn get_texture_format<'a>(
876        &self,
877        windows: &'a ExtractedWindows,
878        images: &'a RenderAssets<GpuImage>,
879        manual_texture_views: &'a ManualTextureViews,
880    ) -> Option<TextureFormat> {
881        match self {
882            NormalizedRenderTarget::Window(window_ref) => windows
883                .get(&window_ref.entity())
884                .and_then(|window| window.swap_chain_texture_format),
885            NormalizedRenderTarget::Image(image_target) => images
886                .get(&image_target.handle)
887                .map(|image| image.texture_format),
888            NormalizedRenderTarget::TextureView(id) => {
889                manual_texture_views.get(id).map(|tex| tex.format)
890            }
891        }
892    }
893
894    pub fn get_render_target_info<'a>(
895        &self,
896        resolutions: impl IntoIterator<Item = (Entity, &'a Window)>,
897        images: &Assets<Image>,
898        manual_texture_views: &ManualTextureViews,
899    ) -> Option<RenderTargetInfo> {
900        match self {
901            NormalizedRenderTarget::Window(window_ref) => resolutions
902                .into_iter()
903                .find(|(entity, _)| *entity == window_ref.entity())
904                .map(|(_, window)| RenderTargetInfo {
905                    physical_size: window.physical_size(),
906                    scale_factor: window.resolution.scale_factor(),
907                }),
908            NormalizedRenderTarget::Image(image_target) => {
909                let image = images.get(&image_target.handle)?;
910                Some(RenderTargetInfo {
911                    physical_size: image.size(),
912                    scale_factor: image_target.scale_factor.0,
913                })
914            }
915            NormalizedRenderTarget::TextureView(id) => {
916                manual_texture_views.get(id).map(|tex| RenderTargetInfo {
917                    physical_size: tex.size,
918                    scale_factor: 1.0,
919                })
920            }
921        }
922    }
923
924    // Check if this render target is contained in the given changed windows or images.
925    fn is_changed(
926        &self,
927        changed_window_ids: &HashSet<Entity>,
928        changed_image_handles: &HashSet<&AssetId<Image>>,
929    ) -> bool {
930        match self {
931            NormalizedRenderTarget::Window(window_ref) => {
932                changed_window_ids.contains(&window_ref.entity())
933            }
934            NormalizedRenderTarget::Image(image_target) => {
935                changed_image_handles.contains(&image_target.handle.id())
936            }
937            NormalizedRenderTarget::TextureView(_) => true,
938        }
939    }
940}
941
942/// System in charge of updating a [`Camera`] when its window or projection changes.
943///
944/// The system detects window creation, resize, and scale factor change events to update the camera
945/// [`Projection`] if needed.
946///
947/// ## World Resources
948///
949/// [`Res<Assets<Image>>`](Assets<Image>) -- For cameras that render to an image, this resource is used to
950/// inspect information about the render target. This system will not access any other image assets.
951///
952/// [`OrthographicProjection`]: crate::camera::OrthographicProjection
953/// [`PerspectiveProjection`]: crate::camera::PerspectiveProjection
954pub fn camera_system(
955    mut window_resized_events: EventReader<WindowResized>,
956    mut window_created_events: EventReader<WindowCreated>,
957    mut window_scale_factor_changed_events: EventReader<WindowScaleFactorChanged>,
958    mut image_asset_events: EventReader<AssetEvent<Image>>,
959    primary_window: Query<Entity, With<PrimaryWindow>>,
960    windows: Query<(Entity, &Window)>,
961    images: Res<Assets<Image>>,
962    manual_texture_views: Res<ManualTextureViews>,
963    mut cameras: Query<(&mut Camera, &mut Projection)>,
964) {
965    let primary_window = primary_window.iter().next();
966
967    let mut changed_window_ids = <HashSet<_>>::default();
968    changed_window_ids.extend(window_created_events.read().map(|event| event.window));
969    changed_window_ids.extend(window_resized_events.read().map(|event| event.window));
970    let scale_factor_changed_window_ids: HashSet<_> = window_scale_factor_changed_events
971        .read()
972        .map(|event| event.window)
973        .collect();
974    changed_window_ids.extend(scale_factor_changed_window_ids.clone());
975
976    let changed_image_handles: HashSet<&AssetId<Image>> = image_asset_events
977        .read()
978        .filter_map(|event| match event {
979            AssetEvent::Modified { id } | AssetEvent::Added { id } => Some(id),
980            _ => None,
981        })
982        .collect();
983
984    for (mut camera, mut camera_projection) in &mut cameras {
985        let mut viewport_size = camera
986            .viewport
987            .as_ref()
988            .map(|viewport| viewport.physical_size);
989
990        if let Some(normalized_target) = camera.target.normalize(primary_window) {
991            if normalized_target.is_changed(&changed_window_ids, &changed_image_handles)
992                || camera.is_added()
993                || camera_projection.is_changed()
994                || camera.computed.old_viewport_size != viewport_size
995                || camera.computed.old_sub_camera_view != camera.sub_camera_view
996            {
997                let new_computed_target_info = normalized_target.get_render_target_info(
998                    windows,
999                    &images,
1000                    &manual_texture_views,
1001                );
1002                // Check for the scale factor changing, and resize the viewport if needed.
1003                // This can happen when the window is moved between monitors with different DPIs.
1004                // Without this, the viewport will take a smaller portion of the window moved to
1005                // a higher DPI monitor.
1006                if normalized_target
1007                    .is_changed(&scale_factor_changed_window_ids, &HashSet::default())
1008                {
1009                    if let (Some(new_scale_factor), Some(old_scale_factor)) = (
1010                        new_computed_target_info
1011                            .as_ref()
1012                            .map(|info| info.scale_factor),
1013                        camera
1014                            .computed
1015                            .target_info
1016                            .as_ref()
1017                            .map(|info| info.scale_factor),
1018                    ) {
1019                        let resize_factor = new_scale_factor / old_scale_factor;
1020                        if let Some(ref mut viewport) = camera.viewport {
1021                            let resize = |vec: UVec2| (vec.as_vec2() * resize_factor).as_uvec2();
1022                            viewport.physical_position = resize(viewport.physical_position);
1023                            viewport.physical_size = resize(viewport.physical_size);
1024                            viewport_size = Some(viewport.physical_size);
1025                        }
1026                    }
1027                }
1028                // This check is needed because when changing WindowMode to Fullscreen, the viewport may have invalid
1029                // arguments due to a sudden change on the window size to a lower value.
1030                // If the size of the window is lower, the viewport will match that lower value.
1031                if let Some(viewport) = &mut camera.viewport {
1032                    let target_info = &new_computed_target_info;
1033                    if let Some(target) = target_info {
1034                        viewport.clamp_to_size(target.physical_size);
1035                    }
1036                }
1037                camera.computed.target_info = new_computed_target_info;
1038                if let Some(size) = camera.logical_viewport_size() {
1039                    if size.x != 0.0 && size.y != 0.0 {
1040                        camera_projection.update(size.x, size.y);
1041                        camera.computed.clip_from_view = match &camera.sub_camera_view {
1042                            Some(sub_view) => {
1043                                camera_projection.get_clip_from_view_for_sub(sub_view)
1044                            }
1045                            None => camera_projection.get_clip_from_view(),
1046                        }
1047                    }
1048                }
1049            }
1050        }
1051
1052        if camera.computed.old_viewport_size != viewport_size {
1053            camera.computed.old_viewport_size = viewport_size;
1054        }
1055
1056        if camera.computed.old_sub_camera_view != camera.sub_camera_view {
1057            camera.computed.old_sub_camera_view = camera.sub_camera_view;
1058        }
1059    }
1060}
1061
1062/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera
1063#[derive(Component, ExtractComponent, Clone, Copy, Reflect)]
1064#[reflect(opaque)]
1065#[reflect(Component, Default, Clone)]
1066pub struct CameraMainTextureUsages(pub TextureUsages);
1067impl Default for CameraMainTextureUsages {
1068    fn default() -> Self {
1069        Self(
1070            TextureUsages::RENDER_ATTACHMENT
1071                | TextureUsages::TEXTURE_BINDING
1072                | TextureUsages::COPY_SRC,
1073        )
1074    }
1075}
1076
1077#[derive(Component, Debug)]
1078pub struct ExtractedCamera {
1079    pub target: Option<NormalizedRenderTarget>,
1080    pub physical_viewport_size: Option<UVec2>,
1081    pub physical_target_size: Option<UVec2>,
1082    pub viewport: Option<Viewport>,
1083    pub render_graph: InternedRenderSubGraph,
1084    pub order: isize,
1085    pub output_mode: CameraOutputMode,
1086    pub msaa_writeback: bool,
1087    pub clear_color: ClearColorConfig,
1088    pub sorted_camera_index_for_target: usize,
1089    pub exposure: f32,
1090    pub hdr: bool,
1091}
1092
1093pub fn extract_cameras(
1094    mut commands: Commands,
1095    query: Extract<
1096        Query<(
1097            Entity,
1098            RenderEntity,
1099            &Camera,
1100            &CameraRenderGraph,
1101            &GlobalTransform,
1102            &VisibleEntities,
1103            &Frustum,
1104            Option<&ColorGrading>,
1105            Option<&Exposure>,
1106            Option<&TemporalJitter>,
1107            Option<&RenderLayers>,
1108            Option<&Projection>,
1109            Has<NoIndirectDrawing>,
1110        )>,
1111    >,
1112    primary_window: Extract<Query<Entity, With<PrimaryWindow>>>,
1113    gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1114    mapper: Extract<Query<&RenderEntity>>,
1115) {
1116    let primary_window = primary_window.iter().next();
1117    for (
1118        main_entity,
1119        render_entity,
1120        camera,
1121        camera_render_graph,
1122        transform,
1123        visible_entities,
1124        frustum,
1125        color_grading,
1126        exposure,
1127        temporal_jitter,
1128        render_layers,
1129        projection,
1130        no_indirect_drawing,
1131    ) in query.iter()
1132    {
1133        if !camera.is_active {
1134            commands.entity(render_entity).remove::<(
1135                ExtractedCamera,
1136                ExtractedView,
1137                RenderVisibleEntities,
1138                TemporalJitter,
1139                RenderLayers,
1140                Projection,
1141                NoIndirectDrawing,
1142                ViewUniformOffset,
1143            )>();
1144            continue;
1145        }
1146
1147        let color_grading = color_grading.unwrap_or(&ColorGrading::default()).clone();
1148
1149        if let (
1150            Some(URect {
1151                min: viewport_origin,
1152                ..
1153            }),
1154            Some(viewport_size),
1155            Some(target_size),
1156        ) = (
1157            camera.physical_viewport_rect(),
1158            camera.physical_viewport_size(),
1159            camera.physical_target_size(),
1160        ) {
1161            if target_size.x == 0 || target_size.y == 0 {
1162                continue;
1163            }
1164
1165            let render_visible_entities = RenderVisibleEntities {
1166                entities: visible_entities
1167                    .entities
1168                    .iter()
1169                    .map(|(type_id, entities)| {
1170                        let entities = entities
1171                            .iter()
1172                            .map(|entity| {
1173                                let render_entity = mapper
1174                                    .get(*entity)
1175                                    .cloned()
1176                                    .map(|entity| entity.id())
1177                                    .unwrap_or(Entity::PLACEHOLDER);
1178                                (render_entity, (*entity).into())
1179                            })
1180                            .collect();
1181                        (*type_id, entities)
1182                    })
1183                    .collect(),
1184            };
1185
1186            let mut commands = commands.entity(render_entity);
1187            commands.insert((
1188                ExtractedCamera {
1189                    target: camera.target.normalize(primary_window),
1190                    viewport: camera.viewport.clone(),
1191                    physical_viewport_size: Some(viewport_size),
1192                    physical_target_size: Some(target_size),
1193                    render_graph: camera_render_graph.0,
1194                    order: camera.order,
1195                    output_mode: camera.output_mode,
1196                    msaa_writeback: camera.msaa_writeback,
1197                    clear_color: camera.clear_color,
1198                    // this will be set in sort_cameras
1199                    sorted_camera_index_for_target: 0,
1200                    exposure: exposure
1201                        .map(Exposure::exposure)
1202                        .unwrap_or_else(|| Exposure::default().exposure()),
1203                    hdr: camera.hdr,
1204                },
1205                ExtractedView {
1206                    retained_view_entity: RetainedViewEntity::new(main_entity.into(), None, 0),
1207                    clip_from_view: camera.clip_from_view(),
1208                    world_from_view: *transform,
1209                    clip_from_world: None,
1210                    hdr: camera.hdr,
1211                    viewport: UVec4::new(
1212                        viewport_origin.x,
1213                        viewport_origin.y,
1214                        viewport_size.x,
1215                        viewport_size.y,
1216                    ),
1217                    color_grading,
1218                },
1219                render_visible_entities,
1220                *frustum,
1221            ));
1222
1223            if let Some(temporal_jitter) = temporal_jitter {
1224                commands.insert(temporal_jitter.clone());
1225            }
1226
1227            if let Some(render_layers) = render_layers {
1228                commands.insert(render_layers.clone());
1229            }
1230
1231            if let Some(perspective) = projection {
1232                commands.insert(perspective.clone());
1233            }
1234
1235            if no_indirect_drawing
1236                || !matches!(
1237                    gpu_preprocessing_support.max_supported_mode,
1238                    GpuPreprocessingMode::Culling
1239                )
1240            {
1241                commands.insert(NoIndirectDrawing);
1242            }
1243        };
1244    }
1245}
1246
1247/// Cameras sorted by their order field. This is updated in the [`sort_cameras`] system.
1248#[derive(Resource, Default)]
1249pub struct SortedCameras(pub Vec<SortedCamera>);
1250
1251pub struct SortedCamera {
1252    pub entity: Entity,
1253    pub order: isize,
1254    pub target: Option<NormalizedRenderTarget>,
1255    pub hdr: bool,
1256}
1257
1258pub fn sort_cameras(
1259    mut sorted_cameras: ResMut<SortedCameras>,
1260    mut cameras: Query<(Entity, &mut ExtractedCamera)>,
1261) {
1262    sorted_cameras.0.clear();
1263    for (entity, camera) in cameras.iter() {
1264        sorted_cameras.0.push(SortedCamera {
1265            entity,
1266            order: camera.order,
1267            target: camera.target.clone(),
1268            hdr: camera.hdr,
1269        });
1270    }
1271    // sort by order and ensure within an order, RenderTargets of the same type are packed together
1272    sorted_cameras
1273        .0
1274        .sort_by(|c1, c2| (c1.order, &c1.target).cmp(&(c2.order, &c2.target)));
1275    let mut previous_order_target = None;
1276    let mut ambiguities = <HashSet<_>>::default();
1277    let mut target_counts = <HashMap<_, _>>::default();
1278    for sorted_camera in &mut sorted_cameras.0 {
1279        let new_order_target = (sorted_camera.order, sorted_camera.target.clone());
1280        if let Some(previous_order_target) = previous_order_target {
1281            if previous_order_target == new_order_target {
1282                ambiguities.insert(new_order_target.clone());
1283            }
1284        }
1285        if let Some(target) = &sorted_camera.target {
1286            let count = target_counts
1287                .entry((target.clone(), sorted_camera.hdr))
1288                .or_insert(0usize);
1289            let (_, mut camera) = cameras.get_mut(sorted_camera.entity).unwrap();
1290            camera.sorted_camera_index_for_target = *count;
1291            *count += 1;
1292        }
1293        previous_order_target = Some(new_order_target);
1294    }
1295
1296    if !ambiguities.is_empty() {
1297        warn!(
1298            "Camera order ambiguities detected for active cameras with the following priorities: {:?}. \
1299            To fix this, ensure there is exactly one Camera entity spawned with a given order for a given RenderTarget. \
1300            Ambiguities should be resolved because either (1) multiple active cameras were spawned accidentally, which will \
1301            result in rendering multiple instances of the scene or (2) for cases where multiple active cameras is intentional, \
1302            ambiguities could result in unpredictable render results.",
1303            ambiguities
1304        );
1305    }
1306}
1307
1308/// A subpixel offset to jitter a perspective camera's frustum by.
1309///
1310/// Useful for temporal rendering techniques.
1311///
1312/// Do not use with [`OrthographicProjection`].
1313///
1314/// [`OrthographicProjection`]: crate::camera::OrthographicProjection
1315#[derive(Component, Clone, Default, Reflect)]
1316#[reflect(Default, Component, Clone)]
1317pub struct TemporalJitter {
1318    /// Offset is in range [-0.5, 0.5].
1319    pub offset: Vec2,
1320}
1321
1322impl TemporalJitter {
1323    pub fn jitter_projection(&self, clip_from_view: &mut Mat4, view_size: Vec2) {
1324        if clip_from_view.w_axis.w == 1.0 {
1325            warn!(
1326                "TemporalJitter not supported with OrthographicProjection. Use PerspectiveProjection instead."
1327            );
1328            return;
1329        }
1330
1331        // https://github.com/GPUOpen-LibrariesAndSDKs/FidelityFX-SDK/blob/d7531ae47d8b36a5d4025663e731a47a38be882f/docs/techniques/media/super-resolution-temporal/jitter-space.svg
1332        let jitter = (self.offset * vec2(2.0, -2.0)) / view_size;
1333
1334        clip_from_view.z_axis.x += jitter.x;
1335        clip_from_view.z_axis.y += jitter.y;
1336    }
1337}
1338
1339/// Camera component specifying a mip bias to apply when sampling from material textures.
1340///
1341/// Often used in conjunction with antialiasing post-process effects to reduce textures blurriness.
1342#[derive(Default, Component, Reflect)]
1343#[reflect(Default, Component)]
1344pub struct MipBias(pub f32);