bevy_render/camera/
camera.rs

1use super::{ClearColorConfig, Projection};
2use crate::{
3    batching::gpu_preprocessing::GpuPreprocessingSupport,
4    camera::{CameraProjection, ManualTextureViewHandle, ManualTextureViews},
5    primitives::Frustum,
6    render_asset::RenderAssets,
7    render_graph::{InternedRenderSubGraph, RenderSubGraph},
8    render_resource::TextureView,
9    sync_world::TemporaryRenderEntity,
10    sync_world::{RenderEntity, SyncToRenderWorld},
11    texture::GpuImage,
12    view::{
13        ColorGrading, ExtractedView, ExtractedWindows, GpuCulling, Msaa, RenderLayers,
14        RenderVisibleEntities, ViewUniformOffset, Visibility, VisibleEntities,
15    },
16    Extract,
17};
18use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
19use bevy_derive::{Deref, DerefMut};
20use bevy_ecs::{
21    change_detection::DetectChanges,
22    component::{Component, ComponentId},
23    entity::Entity,
24    event::EventReader,
25    prelude::With,
26    query::Has,
27    reflect::ReflectComponent,
28    system::{Commands, Query, Res, ResMut, Resource},
29    world::DeferredWorld,
30};
31use bevy_image::Image;
32use bevy_math::{ops, vec2, Dir3, Mat4, Ray3d, Rect, URect, UVec2, UVec4, Vec2, Vec3};
33use bevy_reflect::prelude::*;
34use bevy_render_macros::ExtractComponent;
35use bevy_transform::components::{GlobalTransform, Transform};
36use bevy_utils::{tracing::warn, warn_once, HashMap, HashSet};
37use bevy_window::{
38    NormalizedWindowRef, PrimaryWindow, Window, WindowCreated, WindowRef, WindowResized,
39    WindowScaleFactorChanged,
40};
41use core::ops::Range;
42use derive_more::derive::From;
43use wgpu::{BlendState, TextureFormat, TextureUsages};
44
45/// Render viewport configuration for the [`Camera`] component.
46///
47/// The viewport defines the area on the render target to which the camera renders its image.
48/// You can overlay multiple cameras in a single window using viewports to create effects like
49/// split screen, minimaps, and character viewers.
50#[derive(Reflect, Debug, Clone)]
51#[reflect(Default)]
52pub struct Viewport {
53    /// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].
54    /// (0,0) corresponds to the top-left corner
55    pub physical_position: UVec2,
56    /// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].
57    /// The origin of the rectangle is in the top-left corner.
58    pub physical_size: UVec2,
59    /// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).
60    pub depth: Range<f32>,
61}
62
63impl Default for Viewport {
64    fn default() -> Self {
65        Self {
66            physical_position: Default::default(),
67            physical_size: UVec2::new(1, 1),
68            depth: 0.0..1.0,
69        }
70    }
71}
72
73/// Settings to define a camera sub view.
74///
75/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the
76/// image defined by `size` and `offset` (relative to the `full_size` of the
77/// whole image) is projected to the cameras viewport.
78///
79/// Take the example of the following multi-monitor setup:
80/// ```css
81/// ┌───┬───┐
82/// │ A │ B │
83/// ├───┼───┤
84/// │ C │ D │
85/// └───┴───┘
86/// ```
87/// If each monitor is 1920x1080, the whole image will have a resolution of
88/// 3840x2160. For each monitor we can use a single camera with a viewport of
89/// the same size as the monitor it corresponds to. To ensure that the image is
90/// cohesive, we can use a different sub view on each camera:
91/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0
92/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0
93/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080
94/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =
95///   1920,1080
96///
97/// However since only the ratio between the values is important, they could all
98/// be divided by 120 and still produce the same image. Camera D would for
99/// example have the following values:
100/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9
101#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
102pub struct SubCameraView {
103    /// Size of the entire camera view
104    pub full_size: UVec2,
105    /// Offset of the sub camera
106    pub offset: Vec2,
107    /// Size of the sub camera
108    pub size: UVec2,
109}
110
111impl Default for SubCameraView {
112    fn default() -> Self {
113        Self {
114            full_size: UVec2::new(1, 1),
115            offset: Vec2::new(0., 0.),
116            size: UVec2::new(1, 1),
117        }
118    }
119}
120
121/// Information about the current [`RenderTarget`].
122#[derive(Default, Debug, Clone)]
123pub struct RenderTargetInfo {
124    /// The physical size of this render target (in physical pixels, ignoring scale factor).
125    pub physical_size: UVec2,
126    /// The scale factor of this render target.
127    ///
128    /// When rendering to a window, typically it is a value greater or equal than 1.0,
129    /// representing the ratio between the size of the window in physical pixels and the logical size of the window.
130    pub scale_factor: f32,
131}
132
133/// Holds internally computed [`Camera`] values.
134#[derive(Default, Debug, Clone)]
135pub struct ComputedCameraValues {
136    clip_from_view: Mat4,
137    target_info: Option<RenderTargetInfo>,
138    // size of the `Viewport`
139    old_viewport_size: Option<UVec2>,
140    old_sub_camera_view: Option<SubCameraView>,
141}
142
143/// How much energy a `Camera3d` absorbs from incoming light.
144///
145/// <https://en.wikipedia.org/wiki/Exposure_(photography)>
146#[derive(Component, Clone, Copy, Reflect)]
147#[reflect(opaque)]
148#[reflect(Component, Default)]
149pub struct Exposure {
150    /// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>
151    pub ev100: f32,
152}
153
154impl Exposure {
155    pub const SUNLIGHT: Self = Self {
156        ev100: Self::EV100_SUNLIGHT,
157    };
158    pub const OVERCAST: Self = Self {
159        ev100: Self::EV100_OVERCAST,
160    };
161    pub const INDOOR: Self = Self {
162        ev100: Self::EV100_INDOOR,
163    };
164    /// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
165    /// It also happens to be a reasonable default.
166    ///
167    /// See <https://github.com/bevyengine/bevy/issues/11577> for details.
168    pub const BLENDER: Self = Self {
169        ev100: Self::EV100_BLENDER,
170    };
171
172    pub const EV100_SUNLIGHT: f32 = 15.0;
173    pub const EV100_OVERCAST: f32 = 12.0;
174    pub const EV100_INDOOR: f32 = 7.0;
175
176    /// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
177    /// It also happens to be a reasonable default.
178    ///
179    /// See <https://github.com/bevyengine/bevy/issues/11577> for details.
180    pub const EV100_BLENDER: f32 = 9.7;
181
182    pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
183        Self {
184            ev100: physical_camera_parameters.ev100(),
185        }
186    }
187
188    /// Converts EV100 values to exposure values.
189    /// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>
190    #[inline]
191    pub fn exposure(&self) -> f32 {
192        ops::exp2(-self.ev100) / 1.2
193    }
194}
195
196impl Default for Exposure {
197    fn default() -> Self {
198        Self::BLENDER
199    }
200}
201
202/// Parameters based on physical camera characteristics for calculating EV100
203/// values for use with [`Exposure`]. This is also used for depth of field.
204#[derive(Clone, Copy)]
205pub struct PhysicalCameraParameters {
206    /// <https://en.wikipedia.org/wiki/F-number>
207    pub aperture_f_stops: f32,
208    /// <https://en.wikipedia.org/wiki/Shutter_speed>
209    pub shutter_speed_s: f32,
210    /// <https://en.wikipedia.org/wiki/Film_speed>
211    pub sensitivity_iso: f32,
212    /// The height of the [image sensor format] in meters.
213    ///
214    /// Focal length is derived from the FOV and this value. The default is
215    /// 18.66mm, matching the [Super 35] format, which is popular in cinema.
216    ///
217    /// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format
218    ///
219    /// [Super 35]: https://en.wikipedia.org/wiki/Super_35
220    pub sensor_height: f32,
221}
222
223impl PhysicalCameraParameters {
224    /// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).
225    pub fn ev100(&self) -> f32 {
226        ops::log2(
227            self.aperture_f_stops * self.aperture_f_stops * 100.0
228                / (self.shutter_speed_s * self.sensitivity_iso),
229        )
230    }
231}
232
233impl Default for PhysicalCameraParameters {
234    fn default() -> Self {
235        Self {
236            aperture_f_stops: 1.0,
237            shutter_speed_s: 1.0 / 125.0,
238            sensitivity_iso: 100.0,
239            sensor_height: 0.01866,
240        }
241    }
242}
243
244/// Error returned when a conversion between world-space and viewport-space coordinates fails.
245///
246/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].
247#[derive(Debug, Eq, PartialEq, Copy, Clone)]
248pub enum ViewportConversionError {
249    /// The pre-computed size of the viewport was not available.
250    ///
251    /// This may be because the `Camera` was just created and [`camera_system`] has not been executed
252    /// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:
253    ///   - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
254    ///   - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
255    ///   - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
256    ///   - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
257    NoViewportSize,
258    /// The computed coordinate was beyond the `Camera`'s near plane.
259    ///
260    /// Only applicable when converting from world-space to viewport-space.
261    PastNearPlane,
262    /// The computed coordinate was beyond the `Camera`'s far plane.
263    ///
264    /// Only applicable when converting from world-space to viewport-space.
265    PastFarPlane,
266    /// The Normalized Device Coordinates could not be computed because the `camera_transform`, the
267    /// `world_position`, or the projection matrix defined by [`CameraProjection`] contained `NAN`
268    /// (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).
269    InvalidData,
270}
271
272/// The defining [`Component`] for camera entities,
273/// storing information about how and what to render through this camera.
274///
275/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from
276/// which rendering occurs. It defines the position of the view to render, the projection method
277/// to transform the 3D objects into a 2D image, as well as the render target into which that image
278/// is produced.
279///
280/// Note that a [`Camera`] needs a [`CameraRenderGraph`] to render anything.
281/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,
282/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render
283/// graph will emit an error at runtime.
284///
285/// [`Camera2d`]: https://docs.rs/crate/bevy_core_pipeline/latest/core_2d/struct.Camera2d.html
286/// [`Camera3d`]: https://docs.rs/crate/bevy_core_pipeline/latest/core_3d/struct.Camera3d.html
287#[derive(Component, Debug, Reflect, Clone)]
288#[reflect(Component, Default, Debug)]
289#[component(on_add = warn_on_no_render_graph)]
290#[require(
291    Frustum,
292    CameraMainTextureUsages,
293    VisibleEntities,
294    Transform,
295    Visibility,
296    Msaa,
297    SyncToRenderWorld
298)]
299pub struct Camera {
300    /// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].
301    pub viewport: Option<Viewport>,
302    /// Cameras with a higher order are rendered later, and thus on top of lower order cameras.
303    pub order: isize,
304    /// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this
305    /// camera will not be rendered.
306    pub is_active: bool,
307    /// Computed values for this camera, such as the projection matrix and the render target size.
308    #[reflect(ignore)]
309    pub computed: ComputedCameraValues,
310    /// The "target" that this camera will render to.
311    pub target: RenderTarget,
312    /// If this is set to `true`, the camera will use an intermediate "high dynamic range" render texture.
313    /// This allows rendering with a wider range of lighting values.
314    pub hdr: bool,
315    // todo: reflect this when #6042 lands
316    /// The [`CameraOutputMode`] for this camera.
317    #[reflect(ignore)]
318    pub output_mode: CameraOutputMode,
319    /// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's
320    /// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to
321    /// "write their results on top" of previous camera results, and include them as a part of their render results. This is enabled by default to ensure
322    /// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.
323    pub msaa_writeback: bool,
324    /// The clear color operation to perform on the render target.
325    pub clear_color: ClearColorConfig,
326    /// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].
327    pub sub_camera_view: Option<SubCameraView>,
328}
329
330fn warn_on_no_render_graph(world: DeferredWorld, entity: Entity, _: ComponentId) {
331    if !world.entity(entity).contains::<CameraRenderGraph>() {
332        warn!("Entity {entity} has a `Camera` component, but it doesn't have a render graph configured. Consider adding a `Camera2d` or `Camera3d` component, or manually adding a `CameraRenderGraph` component if you need a custom render graph.");
333    }
334}
335
336impl Default for Camera {
337    fn default() -> Self {
338        Self {
339            is_active: true,
340            order: 0,
341            viewport: None,
342            computed: Default::default(),
343            target: Default::default(),
344            output_mode: Default::default(),
345            hdr: false,
346            msaa_writeback: true,
347            clear_color: Default::default(),
348            sub_camera_view: None,
349        }
350    }
351}
352
353impl Camera {
354    /// Converts a physical size in this `Camera` to a logical size.
355    #[inline]
356    pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
357        let scale = self.computed.target_info.as_ref()?.scale_factor;
358        Some(physical_size.as_vec2() / scale)
359    }
360
361    /// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is
362    /// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to
363    /// the full physical rect of the current [`RenderTarget`].
364    #[inline]
365    pub fn physical_viewport_rect(&self) -> Option<URect> {
366        let min = self
367            .viewport
368            .as_ref()
369            .map(|v| v.physical_position)
370            .unwrap_or(UVec2::ZERO);
371        let max = min + self.physical_viewport_size()?;
372        Some(URect { min, max })
373    }
374
375    /// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to
376    /// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the
377    /// full logical rect of the current [`RenderTarget`].
378    #[inline]
379    pub fn logical_viewport_rect(&self) -> Option<Rect> {
380        let URect { min, max } = self.physical_viewport_rect()?;
381        Some(Rect {
382            min: self.to_logical(min)?,
383            max: self.to_logical(max)?,
384        })
385    }
386
387    /// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this
388    /// will be the size of that custom viewport. Otherwise it will default to the full logical size
389    /// of the current [`RenderTarget`].
390    ///  For logic that requires the full logical size of the
391    /// [`RenderTarget`], prefer [`Camera::logical_target_size`].
392    ///
393    /// Returns `None` if either:
394    /// - the function is called just after the `Camera` is created, before `camera_system` is executed,
395    /// - the [`RenderTarget`] isn't correctly set:
396    ///   - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
397    ///   - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
398    ///   - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
399    ///   - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
400    #[inline]
401    pub fn logical_viewport_size(&self) -> Option<Vec2> {
402        self.viewport
403            .as_ref()
404            .and_then(|v| self.to_logical(v.physical_size))
405            .or_else(|| self.logical_target_size())
406    }
407
408    /// The physical size of this camera's viewport (in physical pixels).
409    /// If the `viewport` field is set to [`Some`], this
410    /// will be the size of that custom viewport. Otherwise it will default to the full physical size of
411    /// the current [`RenderTarget`].
412    /// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].
413    #[inline]
414    pub fn physical_viewport_size(&self) -> Option<UVec2> {
415        self.viewport
416            .as_ref()
417            .map(|v| v.physical_size)
418            .or_else(|| self.physical_target_size())
419    }
420
421    /// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.
422    /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
423    /// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].
424    #[inline]
425    pub fn logical_target_size(&self) -> Option<Vec2> {
426        self.computed
427            .target_info
428            .as_ref()
429            .and_then(|t| self.to_logical(t.physical_size))
430    }
431
432    /// The full physical size of this camera's [`RenderTarget`] (in physical pixels),
433    /// ignoring custom `viewport` configuration.
434    /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
435    /// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].
436    #[inline]
437    pub fn physical_target_size(&self) -> Option<UVec2> {
438        self.computed.target_info.as_ref().map(|t| t.physical_size)
439    }
440
441    #[inline]
442    pub fn target_scaling_factor(&self) -> Option<f32> {
443        self.computed
444            .target_info
445            .as_ref()
446            .map(|t: &RenderTargetInfo| t.scale_factor)
447    }
448
449    /// The projection matrix computed using this camera's [`CameraProjection`].
450    #[inline]
451    pub fn clip_from_view(&self) -> Mat4 {
452        self.computed.clip_from_view
453    }
454
455    /// Given a position in world space, use the camera to compute the viewport-space coordinates.
456    ///
457    /// To get the coordinates in Normalized Device Coordinates, you should use
458    /// [`world_to_ndc`](Self::world_to_ndc).
459    ///
460    /// # Panics
461    ///
462    /// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
463    /// (see [`world_to_ndc`][Self::world_to_ndc]).
464    #[doc(alias = "world_to_screen")]
465    pub fn world_to_viewport(
466        &self,
467        camera_transform: &GlobalTransform,
468        world_position: Vec3,
469    ) -> Result<Vec2, ViewportConversionError> {
470        let target_size = self
471            .logical_viewport_size()
472            .ok_or(ViewportConversionError::NoViewportSize)?;
473        let ndc_space_coords = self
474            .world_to_ndc(camera_transform, world_position)
475            .ok_or(ViewportConversionError::InvalidData)?;
476        // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
477        if ndc_space_coords.z < 0.0 {
478            return Err(ViewportConversionError::PastNearPlane);
479        }
480        if ndc_space_coords.z > 1.0 {
481            return Err(ViewportConversionError::PastFarPlane);
482        }
483
484        // Once in NDC space, we can discard the z element and rescale x/y to fit the screen
485        let mut viewport_position = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size;
486        // Flip the Y co-ordinate origin from the bottom to the top.
487        viewport_position.y = target_size.y - viewport_position.y;
488        Ok(viewport_position)
489    }
490
491    /// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.
492    ///
493    /// To get the coordinates in Normalized Device Coordinates, you should use
494    /// [`world_to_ndc`](Self::world_to_ndc).
495    ///
496    /// # Panics
497    ///
498    /// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
499    /// (see [`world_to_ndc`][Self::world_to_ndc]).
500    #[doc(alias = "world_to_screen_with_depth")]
501    pub fn world_to_viewport_with_depth(
502        &self,
503        camera_transform: &GlobalTransform,
504        world_position: Vec3,
505    ) -> Result<Vec3, ViewportConversionError> {
506        let target_size = self
507            .logical_viewport_size()
508            .ok_or(ViewportConversionError::NoViewportSize)?;
509        let ndc_space_coords = self
510            .world_to_ndc(camera_transform, world_position)
511            .ok_or(ViewportConversionError::InvalidData)?;
512        // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
513        if ndc_space_coords.z < 0.0 {
514            return Err(ViewportConversionError::PastNearPlane);
515        }
516        if ndc_space_coords.z > 1.0 {
517            return Err(ViewportConversionError::PastFarPlane);
518        }
519
520        // Stretching ndc depth to value via near plane and negating result to be in positive room again.
521        let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);
522
523        // Once in NDC space, we can discard the z element and rescale x/y to fit the screen
524        let mut viewport_position = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size;
525        // Flip the Y co-ordinate origin from the bottom to the top.
526        viewport_position.y = target_size.y - viewport_position.y;
527        Ok(viewport_position.extend(depth))
528    }
529
530    /// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.
531    ///
532    /// The resulting ray starts on the near plane of the camera.
533    ///
534    /// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.
535    ///
536    /// To get the world space coordinates with Normalized Device Coordinates, you should use
537    /// [`ndc_to_world`](Self::ndc_to_world).
538    ///
539    /// # Panics
540    ///
541    /// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
542    /// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
543    pub fn viewport_to_world(
544        &self,
545        camera_transform: &GlobalTransform,
546        mut viewport_position: Vec2,
547    ) -> Result<Ray3d, ViewportConversionError> {
548        let target_size = self
549            .logical_viewport_size()
550            .ok_or(ViewportConversionError::NoViewportSize)?;
551        // Flip the Y co-ordinate origin from the top to the bottom.
552        viewport_position.y = target_size.y - viewport_position.y;
553        let ndc = viewport_position * 2. / target_size - Vec2::ONE;
554
555        let ndc_to_world =
556            camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
557        let world_near_plane = ndc_to_world.project_point3(ndc.extend(1.));
558        // Using EPSILON because an ndc with Z = 0 returns NaNs.
559        let world_far_plane = ndc_to_world.project_point3(ndc.extend(f32::EPSILON));
560
561        // The fallible direction constructor ensures that world_near_plane and world_far_plane aren't NaN.
562        Dir3::new(world_far_plane - world_near_plane)
563            .map_err(|_| ViewportConversionError::InvalidData)
564            .map(|direction| Ray3d {
565                origin: world_near_plane,
566                direction,
567            })
568    }
569
570    /// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.
571    ///
572    /// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.
573    ///
574    /// To get the world space coordinates with Normalized Device Coordinates, you should use
575    /// [`ndc_to_world`](Self::ndc_to_world).
576    ///
577    /// # Panics
578    ///
579    /// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
580    /// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
581    pub fn viewport_to_world_2d(
582        &self,
583        camera_transform: &GlobalTransform,
584        mut viewport_position: Vec2,
585    ) -> Result<Vec2, ViewportConversionError> {
586        let target_size = self
587            .logical_viewport_size()
588            .ok_or(ViewportConversionError::NoViewportSize)?;
589        // Flip the Y co-ordinate origin from the top to the bottom.
590        viewport_position.y = target_size.y - viewport_position.y;
591        let ndc = viewport_position * 2. / target_size - Vec2::ONE;
592
593        let world_near_plane = self
594            .ndc_to_world(camera_transform, ndc.extend(1.))
595            .ok_or(ViewportConversionError::InvalidData)?;
596
597        Ok(world_near_plane.truncate())
598    }
599
600    /// Given a position in world space, use the camera's viewport to compute the Normalized Device Coordinates.
601    ///
602    /// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,
603    /// and between 0.0 and 1.0 on the Z axis.
604    /// To get the coordinates in the render target's viewport dimensions, you should use
605    /// [`world_to_viewport`](Self::world_to_viewport).
606    ///
607    /// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by [`CameraProjection`] contain `NAN`.
608    ///
609    /// # Panics
610    ///
611    /// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.
612    pub fn world_to_ndc(
613        &self,
614        camera_transform: &GlobalTransform,
615        world_position: Vec3,
616    ) -> Option<Vec3> {
617        // Build a transformation matrix to convert from world space to NDC using camera data
618        let clip_from_world: Mat4 =
619            self.computed.clip_from_view * camera_transform.compute_matrix().inverse();
620        let ndc_space_coords: Vec3 = clip_from_world.project_point3(world_position);
621
622        (!ndc_space_coords.is_nan()).then_some(ndc_space_coords)
623    }
624
625    /// Given a position in Normalized Device Coordinates,
626    /// use the camera's viewport to compute the world space position.
627    ///
628    /// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,
629    /// and between 0.0 and 1.0 on the Z axis.
630    /// To get the world space coordinates with the viewport position, you should use
631    /// [`world_to_viewport`](Self::world_to_viewport).
632    ///
633    /// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by [`CameraProjection`] contain `NAN`.
634    ///
635    /// # Panics
636    ///
637    /// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.
638    pub fn ndc_to_world(&self, camera_transform: &GlobalTransform, ndc: Vec3) -> Option<Vec3> {
639        // Build a transformation matrix to convert from NDC to world space using camera data
640        let ndc_to_world =
641            camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
642
643        let world_space_coords = ndc_to_world.project_point3(ndc);
644
645        (!world_space_coords.is_nan()).then_some(world_space_coords)
646    }
647
648    /// Converts the depth in Normalized Device Coordinates
649    /// to linear view z for perspective projections.
650    ///
651    /// Note: Depth values in front of the camera will be negative as -z is forward
652    pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
653        let near = self.clip_from_view().w_axis.z; // [3][2]
654        -near / ndc_depth
655    }
656
657    /// Converts the depth in Normalized Device Coordinates
658    /// to linear view z for orthographic projections.
659    ///
660    /// Note: Depth values in front of the camera will be negative as -z is forward
661    pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
662        -(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
663        //                       [3][2]                                         [2][2]
664    }
665}
666
667/// Control how this camera outputs once rendering is completed.
668#[derive(Debug, Clone, Copy)]
669pub enum CameraOutputMode {
670    /// Writes the camera output to configured render target.
671    Write {
672        /// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.
673        blend_state: Option<BlendState>,
674        /// The clear color operation to perform on the final render target texture.
675        clear_color: ClearColorConfig,
676    },
677    /// Skips writing the camera output to the configured render target. The output will remain in the
678    /// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target
679    /// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause
680    /// them to be lost. Only use this if you know what you are doing!
681    /// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove
682    /// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.
683    Skip,
684}
685
686impl Default for CameraOutputMode {
687    fn default() -> Self {
688        CameraOutputMode::Write {
689            blend_state: None,
690            clear_color: ClearColorConfig::Default,
691        }
692    }
693}
694
695/// Configures the [`RenderGraph`](crate::render_graph::RenderGraph) name assigned to be run for a given [`Camera`] entity.
696#[derive(Component, Debug, Deref, DerefMut, Reflect, Clone)]
697#[reflect(opaque)]
698#[reflect(Component, Debug)]
699pub struct CameraRenderGraph(InternedRenderSubGraph);
700
701impl CameraRenderGraph {
702    /// Creates a new [`CameraRenderGraph`] from any string-like type.
703    #[inline]
704    pub fn new<T: RenderSubGraph>(name: T) -> Self {
705        Self(name.intern())
706    }
707
708    /// Sets the graph name.
709    #[inline]
710    pub fn set<T: RenderSubGraph>(&mut self, name: T) {
711        self.0 = name.intern();
712    }
713}
714
715/// The "target" that a [`Camera`] will render to. For example, this could be a [`Window`]
716/// swapchain or an [`Image`].
717#[derive(Debug, Clone, Reflect, From)]
718pub enum RenderTarget {
719    /// Window to which the camera's view is rendered.
720    Window(WindowRef),
721    /// Image to which the camera's view is rendered.
722    Image(Handle<Image>),
723    /// Texture View to which the camera's view is rendered.
724    /// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
725    TextureView(ManualTextureViewHandle),
726}
727
728impl Default for RenderTarget {
729    fn default() -> Self {
730        Self::Window(Default::default())
731    }
732}
733
734/// Normalized version of the render target.
735///
736/// Once we have this we shouldn't need to resolve it down anymore.
737#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
738pub enum NormalizedRenderTarget {
739    /// Window to which the camera's view is rendered.
740    Window(NormalizedWindowRef),
741    /// Image to which the camera's view is rendered.
742    Image(Handle<Image>),
743    /// Texture View to which the camera's view is rendered.
744    /// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
745    TextureView(ManualTextureViewHandle),
746}
747
748impl RenderTarget {
749    /// Normalize the render target down to a more concrete value, mostly used for equality comparisons.
750    pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
751        match self {
752            RenderTarget::Window(window_ref) => window_ref
753                .normalize(primary_window)
754                .map(NormalizedRenderTarget::Window),
755            RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
756            RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
757        }
758    }
759
760    /// Get a handle to the render target's image,
761    /// or `None` if the render target is another variant.
762    pub fn as_image(&self) -> Option<&Handle<Image>> {
763        if let Self::Image(handle) = self {
764            Some(handle)
765        } else {
766            None
767        }
768    }
769}
770
771impl NormalizedRenderTarget {
772    pub fn get_texture_view<'a>(
773        &self,
774        windows: &'a ExtractedWindows,
775        images: &'a RenderAssets<GpuImage>,
776        manual_texture_views: &'a ManualTextureViews,
777    ) -> Option<&'a TextureView> {
778        match self {
779            NormalizedRenderTarget::Window(window_ref) => windows
780                .get(&window_ref.entity())
781                .and_then(|window| window.swap_chain_texture_view.as_ref()),
782            NormalizedRenderTarget::Image(image_handle) => {
783                images.get(image_handle).map(|image| &image.texture_view)
784            }
785            NormalizedRenderTarget::TextureView(id) => {
786                manual_texture_views.get(id).map(|tex| &tex.texture_view)
787            }
788        }
789    }
790
791    /// Retrieves the [`TextureFormat`] of this render target, if it exists.
792    pub fn get_texture_format<'a>(
793        &self,
794        windows: &'a ExtractedWindows,
795        images: &'a RenderAssets<GpuImage>,
796        manual_texture_views: &'a ManualTextureViews,
797    ) -> Option<TextureFormat> {
798        match self {
799            NormalizedRenderTarget::Window(window_ref) => windows
800                .get(&window_ref.entity())
801                .and_then(|window| window.swap_chain_texture_format),
802            NormalizedRenderTarget::Image(image_handle) => {
803                images.get(image_handle).map(|image| image.texture_format)
804            }
805            NormalizedRenderTarget::TextureView(id) => {
806                manual_texture_views.get(id).map(|tex| tex.format)
807            }
808        }
809    }
810
811    pub fn get_render_target_info<'a>(
812        &self,
813        resolutions: impl IntoIterator<Item = (Entity, &'a Window)>,
814        images: &Assets<Image>,
815        manual_texture_views: &ManualTextureViews,
816    ) -> Option<RenderTargetInfo> {
817        match self {
818            NormalizedRenderTarget::Window(window_ref) => resolutions
819                .into_iter()
820                .find(|(entity, _)| *entity == window_ref.entity())
821                .map(|(_, window)| RenderTargetInfo {
822                    physical_size: window.physical_size(),
823                    scale_factor: window.resolution.scale_factor(),
824                }),
825            NormalizedRenderTarget::Image(image_handle) => {
826                let image = images.get(image_handle)?;
827                Some(RenderTargetInfo {
828                    physical_size: image.size(),
829                    scale_factor: 1.0,
830                })
831            }
832            NormalizedRenderTarget::TextureView(id) => {
833                manual_texture_views.get(id).map(|tex| RenderTargetInfo {
834                    physical_size: tex.size,
835                    scale_factor: 1.0,
836                })
837            }
838        }
839    }
840
841    // Check if this render target is contained in the given changed windows or images.
842    fn is_changed(
843        &self,
844        changed_window_ids: &HashSet<Entity>,
845        changed_image_handles: &HashSet<&AssetId<Image>>,
846    ) -> bool {
847        match self {
848            NormalizedRenderTarget::Window(window_ref) => {
849                changed_window_ids.contains(&window_ref.entity())
850            }
851            NormalizedRenderTarget::Image(image_handle) => {
852                changed_image_handles.contains(&image_handle.id())
853            }
854            NormalizedRenderTarget::TextureView(_) => true,
855        }
856    }
857}
858
859/// System in charge of updating a [`Camera`] when its window or projection changes.
860///
861/// The system detects window creation, resize, and scale factor change events to update the camera
862/// projection if needed. It also queries any [`CameraProjection`] component associated with the same
863/// entity as the [`Camera`] one, to automatically update the camera projection matrix.
864///
865/// The system function is generic over the camera projection type, and only instances of
866/// [`OrthographicProjection`] and [`PerspectiveProjection`] are automatically added to
867/// the app, as well as the runtime-selected [`Projection`].
868/// The system runs during [`PostUpdate`](bevy_app::PostUpdate).
869///
870/// ## World Resources
871///
872/// [`Res<Assets<Image>>`](Assets<Image>) -- For cameras that render to an image, this resource is used to
873/// inspect information about the render target. This system will not access any other image assets.
874///
875/// [`OrthographicProjection`]: crate::camera::OrthographicProjection
876/// [`PerspectiveProjection`]: crate::camera::PerspectiveProjection
877#[allow(clippy::too_many_arguments)]
878pub fn camera_system<T: CameraProjection + Component>(
879    mut window_resized_events: EventReader<WindowResized>,
880    mut window_created_events: EventReader<WindowCreated>,
881    mut window_scale_factor_changed_events: EventReader<WindowScaleFactorChanged>,
882    mut image_asset_events: EventReader<AssetEvent<Image>>,
883    primary_window: Query<Entity, With<PrimaryWindow>>,
884    windows: Query<(Entity, &Window)>,
885    images: Res<Assets<Image>>,
886    manual_texture_views: Res<ManualTextureViews>,
887    mut cameras: Query<(&mut Camera, &mut T)>,
888) {
889    let primary_window = primary_window.iter().next();
890
891    let mut changed_window_ids = HashSet::new();
892    changed_window_ids.extend(window_created_events.read().map(|event| event.window));
893    changed_window_ids.extend(window_resized_events.read().map(|event| event.window));
894    let scale_factor_changed_window_ids: HashSet<_> = window_scale_factor_changed_events
895        .read()
896        .map(|event| event.window)
897        .collect();
898    changed_window_ids.extend(scale_factor_changed_window_ids.clone());
899
900    let changed_image_handles: HashSet<&AssetId<Image>> = image_asset_events
901        .read()
902        .filter_map(|event| match event {
903            AssetEvent::Modified { id } | AssetEvent::Added { id } => Some(id),
904            _ => None,
905        })
906        .collect();
907
908    for (mut camera, mut camera_projection) in &mut cameras {
909        let mut viewport_size = camera
910            .viewport
911            .as_ref()
912            .map(|viewport| viewport.physical_size);
913
914        if let Some(normalized_target) = camera.target.normalize(primary_window) {
915            if normalized_target.is_changed(&changed_window_ids, &changed_image_handles)
916                || camera.is_added()
917                || camera_projection.is_changed()
918                || camera.computed.old_viewport_size != viewport_size
919                || camera.computed.old_sub_camera_view != camera.sub_camera_view
920            {
921                let new_computed_target_info = normalized_target.get_render_target_info(
922                    &windows,
923                    &images,
924                    &manual_texture_views,
925                );
926                // Check for the scale factor changing, and resize the viewport if needed.
927                // This can happen when the window is moved between monitors with different DPIs.
928                // Without this, the viewport will take a smaller portion of the window moved to
929                // a higher DPI monitor.
930                if normalized_target.is_changed(&scale_factor_changed_window_ids, &HashSet::new()) {
931                    if let (Some(new_scale_factor), Some(old_scale_factor)) = (
932                        new_computed_target_info
933                            .as_ref()
934                            .map(|info| info.scale_factor),
935                        camera
936                            .computed
937                            .target_info
938                            .as_ref()
939                            .map(|info| info.scale_factor),
940                    ) {
941                        let resize_factor = new_scale_factor / old_scale_factor;
942                        if let Some(ref mut viewport) = camera.viewport {
943                            let resize = |vec: UVec2| (vec.as_vec2() * resize_factor).as_uvec2();
944                            viewport.physical_position = resize(viewport.physical_position);
945                            viewport.physical_size = resize(viewport.physical_size);
946                            viewport_size = Some(viewport.physical_size);
947                        }
948                    }
949                }
950                // This check is needed because when changing WindowMode to SizedFullscreen, the viewport may have invalid
951                // arguments due to a sudden change on the window size to a lower value.
952                // If the size of the window is lower, the viewport will match that lower value.
953                if let Some(viewport) = &mut camera.viewport {
954                    let target_info = &new_computed_target_info;
955                    if let Some(target) = target_info {
956                        if viewport.physical_size.x > target.physical_size.x {
957                            viewport.physical_size.x = target.physical_size.x;
958                        }
959                        if viewport.physical_size.y > target.physical_size.y {
960                            viewport.physical_size.y = target.physical_size.y;
961                        }
962                    }
963                }
964                camera.computed.target_info = new_computed_target_info;
965                if let Some(size) = camera.logical_viewport_size() {
966                    if size.x != 0.0 && size.y != 0.0 {
967                        camera_projection.update(size.x, size.y);
968                        camera.computed.clip_from_view = match &camera.sub_camera_view {
969                            Some(sub_view) => {
970                                camera_projection.get_clip_from_view_for_sub(sub_view)
971                            }
972                            None => camera_projection.get_clip_from_view(),
973                        }
974                    }
975                }
976            }
977        }
978
979        if camera.computed.old_viewport_size != viewport_size {
980            camera.computed.old_viewport_size = viewport_size;
981        }
982
983        if camera.computed.old_sub_camera_view != camera.sub_camera_view {
984            camera.computed.old_sub_camera_view = camera.sub_camera_view;
985        }
986    }
987}
988
989/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera
990#[derive(Component, ExtractComponent, Clone, Copy, Reflect)]
991#[reflect(opaque)]
992#[reflect(Component, Default)]
993pub struct CameraMainTextureUsages(pub TextureUsages);
994impl Default for CameraMainTextureUsages {
995    fn default() -> Self {
996        Self(
997            TextureUsages::RENDER_ATTACHMENT
998                | TextureUsages::TEXTURE_BINDING
999                | TextureUsages::COPY_SRC,
1000        )
1001    }
1002}
1003
1004#[derive(Component, Debug)]
1005pub struct ExtractedCamera {
1006    pub target: Option<NormalizedRenderTarget>,
1007    pub physical_viewport_size: Option<UVec2>,
1008    pub physical_target_size: Option<UVec2>,
1009    pub viewport: Option<Viewport>,
1010    pub render_graph: InternedRenderSubGraph,
1011    pub order: isize,
1012    pub output_mode: CameraOutputMode,
1013    pub msaa_writeback: bool,
1014    pub clear_color: ClearColorConfig,
1015    pub sorted_camera_index_for_target: usize,
1016    pub exposure: f32,
1017    pub hdr: bool,
1018}
1019
1020pub fn extract_cameras(
1021    mut commands: Commands,
1022    query: Extract<
1023        Query<(
1024            RenderEntity,
1025            &Camera,
1026            &CameraRenderGraph,
1027            &GlobalTransform,
1028            &VisibleEntities,
1029            &Frustum,
1030            Option<&ColorGrading>,
1031            Option<&Exposure>,
1032            Option<&TemporalJitter>,
1033            Option<&RenderLayers>,
1034            Option<&Projection>,
1035            Has<GpuCulling>,
1036        )>,
1037    >,
1038    primary_window: Extract<Query<Entity, With<PrimaryWindow>>>,
1039    gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1040    mapper: Extract<Query<&RenderEntity>>,
1041) {
1042    let primary_window = primary_window.iter().next();
1043    for (
1044        render_entity,
1045        camera,
1046        camera_render_graph,
1047        transform,
1048        visible_entities,
1049        frustum,
1050        color_grading,
1051        exposure,
1052        temporal_jitter,
1053        render_layers,
1054        projection,
1055        gpu_culling,
1056    ) in query.iter()
1057    {
1058        if !camera.is_active {
1059            commands.entity(render_entity).remove::<(
1060                ExtractedCamera,
1061                ExtractedView,
1062                RenderVisibleEntities,
1063                TemporalJitter,
1064                RenderLayers,
1065                Projection,
1066                GpuCulling,
1067                ViewUniformOffset,
1068            )>();
1069            continue;
1070        }
1071
1072        let color_grading = color_grading.unwrap_or(&ColorGrading::default()).clone();
1073
1074        if let (
1075            Some(URect {
1076                min: viewport_origin,
1077                ..
1078            }),
1079            Some(viewport_size),
1080            Some(target_size),
1081        ) = (
1082            camera.physical_viewport_rect(),
1083            camera.physical_viewport_size(),
1084            camera.physical_target_size(),
1085        ) {
1086            if target_size.x == 0 || target_size.y == 0 {
1087                continue;
1088            }
1089
1090            let render_visible_entities = RenderVisibleEntities {
1091                entities: visible_entities
1092                    .entities
1093                    .iter()
1094                    .map(|(type_id, entities)| {
1095                        let entities = entities
1096                            .iter()
1097                            .map(|entity| {
1098                                let render_entity = mapper
1099                                    .get(*entity)
1100                                    .cloned()
1101                                    .map(|entity| entity.id())
1102                                    .unwrap_or_else(|_e| {
1103                                        commands.spawn(TemporaryRenderEntity).id()
1104                                    });
1105                                (render_entity, (*entity).into())
1106                            })
1107                            .collect();
1108                        (*type_id, entities)
1109                    })
1110                    .collect(),
1111            };
1112            let mut commands = commands.entity(render_entity);
1113            commands.insert((
1114                ExtractedCamera {
1115                    target: camera.target.normalize(primary_window),
1116                    viewport: camera.viewport.clone(),
1117                    physical_viewport_size: Some(viewport_size),
1118                    physical_target_size: Some(target_size),
1119                    render_graph: camera_render_graph.0,
1120                    order: camera.order,
1121                    output_mode: camera.output_mode,
1122                    msaa_writeback: camera.msaa_writeback,
1123                    clear_color: camera.clear_color,
1124                    // this will be set in sort_cameras
1125                    sorted_camera_index_for_target: 0,
1126                    exposure: exposure
1127                        .map(Exposure::exposure)
1128                        .unwrap_or_else(|| Exposure::default().exposure()),
1129                    hdr: camera.hdr,
1130                },
1131                ExtractedView {
1132                    clip_from_view: camera.clip_from_view(),
1133                    world_from_view: *transform,
1134                    clip_from_world: None,
1135                    hdr: camera.hdr,
1136                    viewport: UVec4::new(
1137                        viewport_origin.x,
1138                        viewport_origin.y,
1139                        viewport_size.x,
1140                        viewport_size.y,
1141                    ),
1142                    color_grading,
1143                },
1144                render_visible_entities,
1145                *frustum,
1146            ));
1147
1148            if let Some(temporal_jitter) = temporal_jitter {
1149                commands.insert(temporal_jitter.clone());
1150            }
1151
1152            if let Some(render_layers) = render_layers {
1153                commands.insert(render_layers.clone());
1154            }
1155
1156            if let Some(perspective) = projection {
1157                commands.insert(perspective.clone());
1158            }
1159            if gpu_culling {
1160                if *gpu_preprocessing_support == GpuPreprocessingSupport::Culling {
1161                    commands.insert(GpuCulling);
1162                } else {
1163                    warn_once!(
1164                        "GPU culling isn't supported on this platform; ignoring `GpuCulling`."
1165                    );
1166                }
1167            }
1168        };
1169    }
1170}
1171
1172/// Cameras sorted by their order field. This is updated in the [`sort_cameras`] system.
1173#[derive(Resource, Default)]
1174pub struct SortedCameras(pub Vec<SortedCamera>);
1175
1176pub struct SortedCamera {
1177    pub entity: Entity,
1178    pub order: isize,
1179    pub target: Option<NormalizedRenderTarget>,
1180    pub hdr: bool,
1181}
1182
1183pub fn sort_cameras(
1184    mut sorted_cameras: ResMut<SortedCameras>,
1185    mut cameras: Query<(Entity, &mut ExtractedCamera)>,
1186) {
1187    sorted_cameras.0.clear();
1188    for (entity, camera) in cameras.iter() {
1189        sorted_cameras.0.push(SortedCamera {
1190            entity,
1191            order: camera.order,
1192            target: camera.target.clone(),
1193            hdr: camera.hdr,
1194        });
1195    }
1196    // sort by order and ensure within an order, RenderTargets of the same type are packed together
1197    sorted_cameras
1198        .0
1199        .sort_by(|c1, c2| match c1.order.cmp(&c2.order) {
1200            core::cmp::Ordering::Equal => c1.target.cmp(&c2.target),
1201            ord => ord,
1202        });
1203    let mut previous_order_target = None;
1204    let mut ambiguities = HashSet::new();
1205    let mut target_counts = HashMap::new();
1206    for sorted_camera in &mut sorted_cameras.0 {
1207        let new_order_target = (sorted_camera.order, sorted_camera.target.clone());
1208        if let Some(previous_order_target) = previous_order_target {
1209            if previous_order_target == new_order_target {
1210                ambiguities.insert(new_order_target.clone());
1211            }
1212        }
1213        if let Some(target) = &sorted_camera.target {
1214            let count = target_counts
1215                .entry((target.clone(), sorted_camera.hdr))
1216                .or_insert(0usize);
1217            let (_, mut camera) = cameras.get_mut(sorted_camera.entity).unwrap();
1218            camera.sorted_camera_index_for_target = *count;
1219            *count += 1;
1220        }
1221        previous_order_target = Some(new_order_target);
1222    }
1223
1224    if !ambiguities.is_empty() {
1225        warn!(
1226            "Camera order ambiguities detected for active cameras with the following priorities: {:?}. \
1227            To fix this, ensure there is exactly one Camera entity spawned with a given order for a given RenderTarget. \
1228            Ambiguities should be resolved because either (1) multiple active cameras were spawned accidentally, which will \
1229            result in rendering multiple instances of the scene or (2) for cases where multiple active cameras is intentional, \
1230            ambiguities could result in unpredictable render results.",
1231            ambiguities
1232        );
1233    }
1234}
1235
1236/// A subpixel offset to jitter a perspective camera's frustum by.
1237///
1238/// Useful for temporal rendering techniques.
1239///
1240/// Do not use with [`OrthographicProjection`].
1241///
1242/// [`OrthographicProjection`]: crate::camera::OrthographicProjection
1243#[derive(Component, Clone, Default, Reflect)]
1244#[reflect(Default, Component)]
1245pub struct TemporalJitter {
1246    /// Offset is in range [-0.5, 0.5].
1247    pub offset: Vec2,
1248}
1249
1250impl TemporalJitter {
1251    pub fn jitter_projection(&self, clip_from_view: &mut Mat4, view_size: Vec2) {
1252        if clip_from_view.w_axis.w == 1.0 {
1253            warn!(
1254                "TemporalJitter not supported with OrthographicProjection. Use PerspectiveProjection instead."
1255            );
1256            return;
1257        }
1258
1259        // https://github.com/GPUOpen-LibrariesAndSDKs/FidelityFX-SDK/blob/d7531ae47d8b36a5d4025663e731a47a38be882f/docs/techniques/media/super-resolution-temporal/jitter-space.svg
1260        let jitter = (self.offset * vec2(2.0, -2.0)) / view_size;
1261
1262        clip_from_view.z_axis.x += jitter.x;
1263        clip_from_view.z_axis.y += jitter.y;
1264    }
1265}
1266
1267/// Camera component specifying a mip bias to apply when sampling from material textures.
1268///
1269/// Often used in conjunction with antialiasing post-process effects to reduce textures blurriness.
1270#[derive(Default, Component, Reflect)]
1271#[reflect(Default, Component)]
1272pub struct MipBias(pub f32);