bevy_camera/
camera.rs

1use crate::primitives::Frustum;
2
3use super::{
4    visibility::{Visibility, VisibleEntities},
5    ClearColorConfig,
6};
7use bevy_asset::Handle;
8use bevy_derive::Deref;
9use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};
10use bevy_image::Image;
11use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};
12use bevy_reflect::prelude::*;
13use bevy_transform::components::{GlobalTransform, Transform};
14use bevy_window::{NormalizedWindowRef, WindowRef};
15use core::ops::Range;
16use derive_more::derive::From;
17use thiserror::Error;
18use wgpu_types::{BlendState, TextureUsages};
19
20/// Render viewport configuration for the [`Camera`] component.
21///
22/// The viewport defines the area on the render target to which the camera renders its image.
23/// You can overlay multiple cameras in a single window using viewports to create effects like
24/// split screen, minimaps, and character viewers.
25#[derive(Reflect, Debug, Clone)]
26#[reflect(Default, Clone)]
27pub struct Viewport {
28    /// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].
29    /// (0,0) corresponds to the top-left corner
30    pub physical_position: UVec2,
31    /// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].
32    /// The origin of the rectangle is in the top-left corner.
33    pub physical_size: UVec2,
34    /// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).
35    pub depth: Range<f32>,
36}
37
38impl Default for Viewport {
39    fn default() -> Self {
40        Self {
41            physical_position: Default::default(),
42            physical_size: UVec2::new(1, 1),
43            depth: 0.0..1.0,
44        }
45    }
46}
47
48impl Viewport {
49    /// Cut the viewport rectangle so that it lies inside a rectangle of the
50    /// given size.
51    ///
52    /// If either of the viewport's position coordinates lies outside the given
53    /// dimensions, it will be moved just inside first. If either of the given
54    /// dimensions is zero, the position and size of the viewport rectangle will
55    /// both be set to zero in that dimension.
56    pub fn clamp_to_size(&mut self, size: UVec2) {
57        // If the origin of the viewport rect is outside, then adjust so that
58        // it's just barely inside. Then, cut off the part that is outside.
59        if self.physical_size.x + self.physical_position.x > size.x {
60            if self.physical_position.x < size.x {
61                self.physical_size.x = size.x - self.physical_position.x;
62            } else if size.x > 0 {
63                self.physical_position.x = size.x - 1;
64                self.physical_size.x = 1;
65            } else {
66                self.physical_position.x = 0;
67                self.physical_size.x = 0;
68            }
69        }
70        if self.physical_size.y + self.physical_position.y > size.y {
71            if self.physical_position.y < size.y {
72                self.physical_size.y = size.y - self.physical_position.y;
73            } else if size.y > 0 {
74                self.physical_position.y = size.y - 1;
75                self.physical_size.y = 1;
76            } else {
77                self.physical_position.y = 0;
78                self.physical_size.y = 0;
79            }
80        }
81    }
82
83    pub fn from_viewport_and_override(
84        viewport: Option<&Self>,
85        main_pass_resolution_override: Option<&MainPassResolutionOverride>,
86    ) -> Option<Self> {
87        let mut viewport = viewport.cloned();
88
89        if let Some(override_size) = main_pass_resolution_override {
90            if viewport.is_none() {
91                viewport = Some(Viewport::default());
92            }
93
94            viewport.as_mut().unwrap().physical_size = **override_size;
95        }
96
97        viewport
98    }
99}
100
101/// Override the resolution a 3d camera's main pass is rendered at.
102///
103/// Does not affect post processing.
104///
105/// ## Usage
106///
107/// * Insert this component on a 3d camera entity in the render world.
108/// * The resolution override must be smaller than the camera's viewport size.
109/// * The resolution override is specified in physical pixels.
110/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.
111#[derive(Component, Reflect, Deref, Debug)]
112#[reflect(Component)]
113pub struct MainPassResolutionOverride(pub UVec2);
114
115/// Settings to define a camera sub view.
116///
117/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the
118/// image defined by `size` and `offset` (relative to the `full_size` of the
119/// whole image) is projected to the cameras viewport.
120///
121/// Take the example of the following multi-monitor setup:
122/// ```css
123/// ┌───┬───┐
124/// │ A │ B │
125/// ├───┼───┤
126/// │ C │ D │
127/// └───┴───┘
128/// ```
129/// If each monitor is 1920x1080, the whole image will have a resolution of
130/// 3840x2160. For each monitor we can use a single camera with a viewport of
131/// the same size as the monitor it corresponds to. To ensure that the image is
132/// cohesive, we can use a different sub view on each camera:
133/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0
134/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0
135/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080
136/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =
137///   1920,1080
138///
139/// However since only the ratio between the values is important, they could all
140/// be divided by 120 and still produce the same image. Camera D would for
141/// example have the following values:
142/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9
143#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
144#[reflect(Clone, PartialEq, Default)]
145pub struct SubCameraView {
146    /// Size of the entire camera view
147    pub full_size: UVec2,
148    /// Offset of the sub camera
149    pub offset: Vec2,
150    /// Size of the sub camera
151    pub size: UVec2,
152}
153
154impl Default for SubCameraView {
155    fn default() -> Self {
156        Self {
157            full_size: UVec2::new(1, 1),
158            offset: Vec2::new(0., 0.),
159            size: UVec2::new(1, 1),
160        }
161    }
162}
163
164/// Information about the current [`RenderTarget`].
165#[derive(Debug, Clone)]
166pub struct RenderTargetInfo {
167    /// The physical size of this render target (in physical pixels, ignoring scale factor).
168    pub physical_size: UVec2,
169    /// The scale factor of this render target.
170    ///
171    /// When rendering to a window, typically it is a value greater or equal than 1.0,
172    /// representing the ratio between the size of the window in physical pixels and the logical size of the window.
173    pub scale_factor: f32,
174}
175
176impl Default for RenderTargetInfo {
177    fn default() -> Self {
178        Self {
179            physical_size: Default::default(),
180            scale_factor: 1.,
181        }
182    }
183}
184
185/// Holds internally computed [`Camera`] values.
186#[derive(Default, Debug, Clone)]
187pub struct ComputedCameraValues {
188    pub clip_from_view: Mat4,
189    pub target_info: Option<RenderTargetInfo>,
190    // size of the `Viewport`
191    pub old_viewport_size: Option<UVec2>,
192    pub old_sub_camera_view: Option<SubCameraView>,
193}
194
195/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.
196///
197/// <https://en.wikipedia.org/wiki/Exposure_(photography)>
198#[derive(Component, Clone, Copy, Reflect)]
199#[reflect(opaque)]
200#[reflect(Component, Default, Clone)]
201pub struct Exposure {
202    /// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>
203    pub ev100: f32,
204}
205
206impl Exposure {
207    pub const SUNLIGHT: Self = Self {
208        ev100: Self::EV100_SUNLIGHT,
209    };
210    pub const OVERCAST: Self = Self {
211        ev100: Self::EV100_OVERCAST,
212    };
213    pub const INDOOR: Self = Self {
214        ev100: Self::EV100_INDOOR,
215    };
216    /// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
217    /// It also happens to be a reasonable default.
218    ///
219    /// See <https://github.com/bevyengine/bevy/issues/11577> for details.
220    pub const BLENDER: Self = Self {
221        ev100: Self::EV100_BLENDER,
222    };
223
224    pub const EV100_SUNLIGHT: f32 = 15.0;
225    pub const EV100_OVERCAST: f32 = 12.0;
226    pub const EV100_INDOOR: f32 = 7.0;
227
228    /// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
229    /// It also happens to be a reasonable default.
230    ///
231    /// See <https://github.com/bevyengine/bevy/issues/11577> for details.
232    pub const EV100_BLENDER: f32 = 9.7;
233
234    pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
235        Self {
236            ev100: physical_camera_parameters.ev100(),
237        }
238    }
239
240    /// Converts EV100 values to exposure values.
241    /// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>
242    #[inline]
243    pub fn exposure(&self) -> f32 {
244        ops::exp2(-self.ev100) / 1.2
245    }
246}
247
248impl Default for Exposure {
249    fn default() -> Self {
250        Self::BLENDER
251    }
252}
253
254/// Parameters based on physical camera characteristics for calculating EV100
255/// values for use with [`Exposure`]. This is also used for depth of field.
256#[derive(Clone, Copy)]
257pub struct PhysicalCameraParameters {
258    /// <https://en.wikipedia.org/wiki/F-number>
259    pub aperture_f_stops: f32,
260    /// <https://en.wikipedia.org/wiki/Shutter_speed>
261    pub shutter_speed_s: f32,
262    /// <https://en.wikipedia.org/wiki/Film_speed>
263    pub sensitivity_iso: f32,
264    /// The height of the [image sensor format] in meters.
265    ///
266    /// Focal length is derived from the FOV and this value. The default is
267    /// 18.66mm, matching the [Super 35] format, which is popular in cinema.
268    ///
269    /// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format
270    ///
271    /// [Super 35]: https://en.wikipedia.org/wiki/Super_35
272    pub sensor_height: f32,
273}
274
275impl PhysicalCameraParameters {
276    /// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).
277    pub fn ev100(&self) -> f32 {
278        ops::log2(
279            self.aperture_f_stops * self.aperture_f_stops * 100.0
280                / (self.shutter_speed_s * self.sensitivity_iso),
281        )
282    }
283}
284
285impl Default for PhysicalCameraParameters {
286    fn default() -> Self {
287        Self {
288            aperture_f_stops: 1.0,
289            shutter_speed_s: 1.0 / 125.0,
290            sensitivity_iso: 100.0,
291            sensor_height: 0.01866,
292        }
293    }
294}
295
296/// Error returned when a conversion between world-space and viewport-space coordinates fails.
297///
298/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].
299#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]
300pub enum ViewportConversionError {
301    /// The pre-computed size of the viewport was not available.
302    ///
303    /// This may be because the `Camera` was just created and `camera_system` has not been executed
304    /// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:
305    ///   - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
306    ///   - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
307    ///   - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
308    ///   - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
309    #[error("pre-computed size of viewport not available")]
310    NoViewportSize,
311    /// The computed coordinate was beyond the `Camera`'s near plane.
312    ///
313    /// Only applicable when converting from world-space to viewport-space.
314    #[error("computed coordinate beyond `Camera`'s near plane")]
315    PastNearPlane,
316    /// The computed coordinate was beyond the `Camera`'s far plane.
317    ///
318    /// Only applicable when converting from world-space to viewport-space.
319    #[error("computed coordinate beyond `Camera`'s far plane")]
320    PastFarPlane,
321    /// The Normalized Device Coordinates could not be computed because the `camera_transform`, the
322    /// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)
323    /// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).
324    #[error("found NaN while computing NDC")]
325    InvalidData,
326}
327
328/// The defining [`Component`] for camera entities,
329/// storing information about how and what to render through this camera.
330///
331/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from
332/// which rendering occurs. It defines the position of the view to render, the projection method
333/// to transform the 3D objects into a 2D image, as well as the render target into which that image
334/// is produced.
335///
336/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.
337/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,
338/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render
339/// graph will emit an error at runtime.
340///
341/// [`Camera2d`]: crate::Camera2d
342/// [`Camera3d`]: crate::Camera3d
343#[derive(Component, Debug, Reflect, Clone)]
344#[reflect(Component, Default, Debug, Clone)]
345#[require(
346    Frustum,
347    CameraMainTextureUsages,
348    VisibleEntities,
349    Transform,
350    Visibility
351)]
352pub struct Camera {
353    /// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].
354    pub viewport: Option<Viewport>,
355    /// Cameras with a higher order are rendered later, and thus on top of lower order cameras.
356    pub order: isize,
357    /// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this
358    /// camera will not be rendered.
359    pub is_active: bool,
360    /// Computed values for this camera, such as the projection matrix and the render target size.
361    #[reflect(ignore, clone)]
362    pub computed: ComputedCameraValues,
363    /// The "target" that this camera will render to.
364    pub target: RenderTarget,
365    // todo: reflect this when #6042 lands
366    /// The [`CameraOutputMode`] for this camera.
367    #[reflect(ignore, clone)]
368    pub output_mode: CameraOutputMode,
369    /// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's
370    /// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to
371    /// "write their results on top" of previous camera results, and include them as a part of their render results. This is enabled by default to ensure
372    /// cameras with MSAA enabled layer their results in the same way as cameras without MSAA enabled by default.
373    pub msaa_writeback: bool,
374    /// The clear color operation to perform on the render target.
375    pub clear_color: ClearColorConfig,
376    /// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].
377    pub sub_camera_view: Option<SubCameraView>,
378}
379
380impl Default for Camera {
381    fn default() -> Self {
382        Self {
383            is_active: true,
384            order: 0,
385            viewport: None,
386            computed: Default::default(),
387            target: Default::default(),
388            output_mode: Default::default(),
389            msaa_writeback: true,
390            clear_color: Default::default(),
391            sub_camera_view: None,
392        }
393    }
394}
395
396impl Camera {
397    /// Converts a physical size in this `Camera` to a logical size.
398    #[inline]
399    pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
400        let scale = self.computed.target_info.as_ref()?.scale_factor;
401        Some(physical_size.as_vec2() / scale)
402    }
403
404    /// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is
405    /// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to
406    /// the full physical rect of the current [`RenderTarget`].
407    #[inline]
408    pub fn physical_viewport_rect(&self) -> Option<URect> {
409        let min = self
410            .viewport
411            .as_ref()
412            .map(|v| v.physical_position)
413            .unwrap_or(UVec2::ZERO);
414        let max = min + self.physical_viewport_size()?;
415        Some(URect { min, max })
416    }
417
418    /// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to
419    /// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the
420    /// full logical rect of the current [`RenderTarget`].
421    #[inline]
422    pub fn logical_viewport_rect(&self) -> Option<Rect> {
423        let URect { min, max } = self.physical_viewport_rect()?;
424        Some(Rect {
425            min: self.to_logical(min)?,
426            max: self.to_logical(max)?,
427        })
428    }
429
430    /// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this
431    /// will be the size of that custom viewport. Otherwise it will default to the full logical size
432    /// of the current [`RenderTarget`].
433    ///  For logic that requires the full logical size of the
434    /// [`RenderTarget`], prefer [`Camera::logical_target_size`].
435    ///
436    /// Returns `None` if either:
437    /// - the function is called just after the `Camera` is created, before `camera_system` is executed,
438    /// - the [`RenderTarget`] isn't correctly set:
439    ///   - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
440    ///   - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
441    ///   - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
442    ///   - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
443    #[inline]
444    pub fn logical_viewport_size(&self) -> Option<Vec2> {
445        self.viewport
446            .as_ref()
447            .and_then(|v| self.to_logical(v.physical_size))
448            .or_else(|| self.logical_target_size())
449    }
450
451    /// The physical size of this camera's viewport (in physical pixels).
452    /// If the `viewport` field is set to [`Some`], this
453    /// will be the size of that custom viewport. Otherwise it will default to the full physical size of
454    /// the current [`RenderTarget`].
455    /// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].
456    #[inline]
457    pub fn physical_viewport_size(&self) -> Option<UVec2> {
458        self.viewport
459            .as_ref()
460            .map(|v| v.physical_size)
461            .or_else(|| self.physical_target_size())
462    }
463
464    /// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.
465    /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
466    /// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].
467    #[inline]
468    pub fn logical_target_size(&self) -> Option<Vec2> {
469        self.computed
470            .target_info
471            .as_ref()
472            .and_then(|t| self.to_logical(t.physical_size))
473    }
474
475    /// The full physical size of this camera's [`RenderTarget`] (in physical pixels),
476    /// ignoring custom `viewport` configuration.
477    /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
478    /// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].
479    #[inline]
480    pub fn physical_target_size(&self) -> Option<UVec2> {
481        self.computed.target_info.as_ref().map(|t| t.physical_size)
482    }
483
484    #[inline]
485    pub fn target_scaling_factor(&self) -> Option<f32> {
486        self.computed
487            .target_info
488            .as_ref()
489            .map(|t: &RenderTargetInfo| t.scale_factor)
490    }
491
492    /// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).
493    #[inline]
494    pub fn clip_from_view(&self) -> Mat4 {
495        self.computed.clip_from_view
496    }
497
498    /// Given a position in world space, use the camera to compute the viewport-space coordinates.
499    ///
500    /// To get the coordinates in Normalized Device Coordinates, you should use
501    /// [`world_to_ndc`](Self::world_to_ndc).
502    ///
503    /// # Panics
504    ///
505    /// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
506    /// (see [`world_to_ndc`][Self::world_to_ndc]).
507    #[doc(alias = "world_to_screen")]
508    pub fn world_to_viewport(
509        &self,
510        camera_transform: &GlobalTransform,
511        world_position: Vec3,
512    ) -> Result<Vec2, ViewportConversionError> {
513        let target_rect = self
514            .logical_viewport_rect()
515            .ok_or(ViewportConversionError::NoViewportSize)?;
516        let mut ndc_space_coords = self
517            .world_to_ndc(camera_transform, world_position)
518            .ok_or(ViewportConversionError::InvalidData)?;
519        // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
520        if ndc_space_coords.z < 0.0 {
521            return Err(ViewportConversionError::PastFarPlane);
522        }
523        if ndc_space_coords.z > 1.0 {
524            return Err(ViewportConversionError::PastNearPlane);
525        }
526
527        // Flip the Y co-ordinate origin from the bottom to the top.
528        ndc_space_coords.y = -ndc_space_coords.y;
529
530        // Once in NDC space, we can discard the z element and map x/y to the viewport rect
531        let viewport_position =
532            (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
533        Ok(viewport_position)
534    }
535
536    /// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.
537    ///
538    /// To get the coordinates in Normalized Device Coordinates, you should use
539    /// [`world_to_ndc`](Self::world_to_ndc).
540    ///
541    /// # Panics
542    ///
543    /// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
544    /// (see [`world_to_ndc`][Self::world_to_ndc]).
545    #[doc(alias = "world_to_screen_with_depth")]
546    pub fn world_to_viewport_with_depth(
547        &self,
548        camera_transform: &GlobalTransform,
549        world_position: Vec3,
550    ) -> Result<Vec3, ViewportConversionError> {
551        let target_rect = self
552            .logical_viewport_rect()
553            .ok_or(ViewportConversionError::NoViewportSize)?;
554        let mut ndc_space_coords = self
555            .world_to_ndc(camera_transform, world_position)
556            .ok_or(ViewportConversionError::InvalidData)?;
557        // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
558        if ndc_space_coords.z < 0.0 {
559            return Err(ViewportConversionError::PastFarPlane);
560        }
561        if ndc_space_coords.z > 1.0 {
562            return Err(ViewportConversionError::PastNearPlane);
563        }
564
565        // Stretching ndc depth to value via near plane and negating result to be in positive room again.
566        let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);
567
568        // Flip the Y co-ordinate origin from the bottom to the top.
569        ndc_space_coords.y = -ndc_space_coords.y;
570
571        // Once in NDC space, we can discard the z element and map x/y to the viewport rect
572        let viewport_position =
573            (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
574        Ok(viewport_position.extend(depth))
575    }
576
577    /// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.
578    ///
579    /// The resulting ray starts on the near plane of the camera.
580    ///
581    /// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.
582    ///
583    /// To get the world space coordinates with Normalized Device Coordinates, you should use
584    /// [`ndc_to_world`](Self::ndc_to_world).
585    ///
586    /// # Example
587    /// ```no_run
588    /// # use bevy_window::Window;
589    /// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};
590    /// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
591    /// # use bevy_camera::Camera;
592    /// # use bevy_app::{App, PostUpdate};
593    /// #
594    /// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
595    ///     let (camera, camera_transform) = *camera_query;
596    ///
597    ///     if let Some(cursor_position) = window.cursor_position()
598    ///         // Calculate a ray pointing from the camera into the world based on the cursor's position.
599    ///         && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)
600    ///     {
601    ///         println!("{ray:?}");
602    ///     }
603    /// }
604    ///
605    /// # let mut app = App::new();
606    /// // Run the system after transform propagation so the camera's global transform is up-to-date.
607    /// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
608    /// ```
609    ///
610    /// # Panics
611    ///
612    /// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
613    /// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
614    pub fn viewport_to_world(
615        &self,
616        camera_transform: &GlobalTransform,
617        viewport_position: Vec2,
618    ) -> Result<Ray3d, ViewportConversionError> {
619        let target_rect = self
620            .logical_viewport_rect()
621            .ok_or(ViewportConversionError::NoViewportSize)?;
622        let rect_relative = (viewport_position - target_rect.min) / target_rect.size();
623        let mut ndc_xy = rect_relative * 2. - Vec2::ONE;
624        // Flip the Y co-ordinate from the top to the bottom to enter NDC.
625        ndc_xy.y = -ndc_xy.y;
626
627        let ndc_point_near = ndc_xy.extend(1.0).into();
628        // Using EPSILON because an ndc with Z = 0 returns NaNs.
629        let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();
630        let view_from_clip = self.computed.clip_from_view.inverse();
631        let world_from_view = camera_transform.affine();
632        // We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
633        // (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
634        // Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.
635        let view_point_near = view_from_clip.project_point3a(ndc_point_near);
636        let view_point_far = view_from_clip.project_point3a(ndc_point_far);
637        let view_dir = view_point_far - view_point_near;
638        let origin = world_from_view.transform_point3a(view_point_near).into();
639        let direction = world_from_view.transform_vector3a(view_dir).into();
640
641        // The fallible direction constructor ensures that direction isn't NaN.
642        Dir3::new(direction)
643            .map_err(|_| ViewportConversionError::InvalidData)
644            .map(|direction| Ray3d { origin, direction })
645    }
646
647    /// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.
648    ///
649    /// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.
650    ///
651    /// To get the world space coordinates with Normalized Device Coordinates, you should use
652    /// [`ndc_to_world`](Self::ndc_to_world).
653    ///
654    /// # Example
655    /// ```no_run
656    /// # use bevy_window::Window;
657    /// # use bevy_ecs::prelude::*;
658    /// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
659    /// # use bevy_camera::Camera;
660    /// # use bevy_app::{App, PostUpdate};
661    /// #
662    /// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
663    ///     let (camera, camera_transform) = *camera_query;
664    ///
665    ///     if let Some(cursor_position) = window.cursor_position()
666    ///         // Calculate a world position based on the cursor's position.
667    ///         && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)
668    ///     {
669    ///         println!("World position: {world_pos:.2}");
670    ///     }
671    /// }
672    ///
673    /// # let mut app = App::new();
674    /// // Run the system after transform propagation so the camera's global transform is up-to-date.
675    /// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
676    /// ```
677    ///
678    /// # Panics
679    ///
680    /// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
681    /// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
682    pub fn viewport_to_world_2d(
683        &self,
684        camera_transform: &GlobalTransform,
685        viewport_position: Vec2,
686    ) -> Result<Vec2, ViewportConversionError> {
687        let target_rect = self
688            .logical_viewport_rect()
689            .ok_or(ViewportConversionError::NoViewportSize)?;
690        let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();
691
692        // Flip the Y co-ordinate origin from the top to the bottom.
693        rect_relative.y = 1.0 - rect_relative.y;
694
695        let ndc = rect_relative * 2. - Vec2::ONE;
696
697        let world_near_plane = self
698            .ndc_to_world(camera_transform, ndc.extend(1.))
699            .ok_or(ViewportConversionError::InvalidData)?;
700
701        Ok(world_near_plane.truncate())
702    }
703
704    /// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.
705    ///
706    /// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)
707    /// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.
708    /// To get the coordinates in the render target's viewport dimensions, you should use
709    /// [`world_to_viewport`](Self::world_to_viewport).
710    ///
711    /// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
712    /// [`Projection`](super::projection::Projection) contain `NAN`.
713    ///
714    /// # Panics
715    ///
716    /// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.
717    pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(
718        &self,
719        camera_transform: &GlobalTransform,
720        world_point: V,
721    ) -> Option<V> {
722        let view_from_world = camera_transform.affine().inverse();
723        let view_point = view_from_world.transform_point3a(world_point.into());
724        let ndc_point = self.computed.clip_from_view.project_point3a(view_point);
725
726        (!ndc_point.is_nan()).then_some(ndc_point.into())
727    }
728
729    /// Given a position in Normalized Device Coordinates,
730    /// use the camera's viewport to compute the world space position.
731    ///
732    /// When the position is within the viewport the values returned will be between -1.0 and 1.0 on the X and Y axes,
733    /// and between 0.0 and 1.0 on the Z axis.
734    /// To get the world space coordinates with the viewport position, you should use
735    /// [`world_to_viewport`](Self::world_to_viewport).
736    ///
737    /// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
738    /// [`Projection`](super::projection::Projection) contain `NAN`.
739    ///
740    /// # Panics
741    ///
742    /// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.
743    pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(
744        &self,
745        camera_transform: &GlobalTransform,
746        ndc_point: V,
747    ) -> Option<V> {
748        // We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
749        // (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
750        let view_point = self
751            .computed
752            .clip_from_view
753            .inverse()
754            .project_point3a(ndc_point.into());
755        let world_point = camera_transform.affine().transform_point3a(view_point);
756
757        (!world_point.is_nan()).then_some(world_point.into())
758    }
759
760    /// Converts the depth in Normalized Device Coordinates
761    /// to linear view z for perspective projections.
762    ///
763    /// Note: Depth values in front of the camera will be negative as -z is forward
764    pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
765        let near = self.clip_from_view().w_axis.z; // [3][2]
766        -near / ndc_depth
767    }
768
769    /// Converts the depth in Normalized Device Coordinates
770    /// to linear view z for orthographic projections.
771    ///
772    /// Note: Depth values in front of the camera will be negative as -z is forward
773    pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
774        -(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
775        //                       [3][2]                                         [2][2]
776    }
777}
778
779/// Control how this [`Camera`] outputs once rendering is completed.
780#[derive(Debug, Clone, Copy)]
781pub enum CameraOutputMode {
782    /// Writes the camera output to configured render target.
783    Write {
784        /// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.
785        /// If not set, the output will be written as-is, ignoring `clear_color` and the existing data in the final render target texture.
786        blend_state: Option<BlendState>,
787        /// The clear color operation to perform on the final render target texture.
788        clear_color: ClearColorConfig,
789    },
790    /// Skips writing the camera output to the configured render target. The output will remain in the
791    /// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target
792    /// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause
793    /// them to be lost. Only use this if you know what you are doing!
794    /// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove
795    /// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.
796    Skip,
797}
798
799impl Default for CameraOutputMode {
800    fn default() -> Self {
801        CameraOutputMode::Write {
802            blend_state: None,
803            clear_color: ClearColorConfig::Default,
804        }
805    }
806}
807
808/// The "target" that a [`Camera`] will render to. For example, this could be a `Window`
809/// swapchain or an [`Image`].
810#[derive(Debug, Clone, Reflect, From)]
811#[reflect(Clone)]
812pub enum RenderTarget {
813    /// Window to which the camera's view is rendered.
814    Window(WindowRef),
815    /// Image to which the camera's view is rendered.
816    Image(ImageRenderTarget),
817    /// Texture View to which the camera's view is rendered.
818    /// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
819    TextureView(ManualTextureViewHandle),
820    /// The camera won't render to any color target.
821    ///
822    /// This is useful when you want a camera that *only* renders prepasses, for
823    /// example a depth prepass. See the `render_depth_to_texture` example.
824    None {
825        /// The physical size of the viewport.
826        size: UVec2,
827    },
828}
829
830impl RenderTarget {
831    /// Get a handle to the render target's image,
832    /// or `None` if the render target is another variant.
833    pub fn as_image(&self) -> Option<&Handle<Image>> {
834        if let Self::Image(image_target) = self {
835            Some(&image_target.handle)
836        } else {
837            None
838        }
839    }
840}
841
842impl RenderTarget {
843    /// Normalize the render target down to a more concrete value, mostly used for equality comparisons.
844    pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
845        match self {
846            RenderTarget::Window(window_ref) => window_ref
847                .normalize(primary_window)
848                .map(NormalizedRenderTarget::Window),
849            RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
850            RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
851            RenderTarget::None { size } => Some(NormalizedRenderTarget::None {
852                width: size.x,
853                height: size.y,
854            }),
855        }
856    }
857}
858
859/// Normalized version of the render target.
860///
861/// Once we have this we shouldn't need to resolve it down anymore.
862#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
863#[reflect(Clone, PartialEq, Hash)]
864pub enum NormalizedRenderTarget {
865    /// Window to which the camera's view is rendered.
866    Window(NormalizedWindowRef),
867    /// Image to which the camera's view is rendered.
868    Image(ImageRenderTarget),
869    /// Texture View to which the camera's view is rendered.
870    /// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
871    TextureView(ManualTextureViewHandle),
872    /// The camera won't render to any color target.
873    ///
874    /// This is useful when you want a camera that *only* renders prepasses, for
875    /// example a depth prepass. See the `render_depth_to_texture` example.
876    None {
877        /// The physical width of the viewport.
878        width: u32,
879        /// The physical height of the viewport.
880        height: u32,
881    },
882}
883
884/// A unique id that corresponds to a specific `ManualTextureView` in the `ManualTextureViews` collection.
885///
886/// See `ManualTextureViews` in `bevy_camera` for more details.
887#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)]
888#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)]
889pub struct ManualTextureViewHandle(pub u32);
890
891/// A render target that renders to an [`Image`].
892#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord)]
893#[reflect(Clone, PartialEq, Hash)]
894pub struct ImageRenderTarget {
895    /// The image to render to.
896    pub handle: Handle<Image>,
897    /// The scale factor of the render target image, corresponding to the scale
898    /// factor for a window target. This should almost always be 1.0.
899    pub scale_factor: FloatOrd,
900}
901
902impl From<Handle<Image>> for RenderTarget {
903    fn from(handle: Handle<Image>) -> Self {
904        Self::Image(handle.into())
905    }
906}
907
908impl From<Handle<Image>> for ImageRenderTarget {
909    fn from(handle: Handle<Image>) -> Self {
910        Self {
911            handle,
912            scale_factor: FloatOrd(1.0),
913        }
914    }
915}
916
917impl Default for RenderTarget {
918    fn default() -> Self {
919        Self::Window(Default::default())
920    }
921}
922
923/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera
924#[derive(Component, Clone, Copy, Reflect)]
925#[reflect(opaque)]
926#[reflect(Component, Default, Clone)]
927pub struct CameraMainTextureUsages(pub TextureUsages);
928
929impl Default for CameraMainTextureUsages {
930    fn default() -> Self {
931        Self(
932            TextureUsages::RENDER_ATTACHMENT
933                | TextureUsages::TEXTURE_BINDING
934                | TextureUsages::COPY_SRC,
935        )
936    }
937}
938
939impl CameraMainTextureUsages {
940    pub fn with(mut self, usages: TextureUsages) -> Self {
941        self.0 |= usages;
942        self
943    }
944}
945
946#[cfg(test)]
947mod test {
948    use bevy_math::{Vec2, Vec3};
949    use bevy_transform::components::GlobalTransform;
950
951    use crate::{
952        Camera, OrthographicProjection, PerspectiveProjection, Projection, RenderTargetInfo,
953        Viewport,
954    };
955
956    fn make_camera(mut projection: Projection, physical_size: Vec2) -> Camera {
957        let viewport = Viewport {
958            physical_size: physical_size.as_uvec2(),
959            ..Default::default()
960        };
961        let mut camera = Camera {
962            viewport: Some(viewport.clone()),
963            ..Default::default()
964        };
965        camera.computed.target_info = Some(RenderTargetInfo {
966            physical_size: viewport.physical_size,
967            scale_factor: 1.0,
968        });
969        projection.update(
970            viewport.physical_size.x as f32,
971            viewport.physical_size.y as f32,
972        );
973        camera.computed.clip_from_view = projection.get_clip_from_view();
974        camera
975    }
976
977    #[test]
978    fn viewport_to_world_orthographic_3d_returns_forward() {
979        let transform = GlobalTransform::default();
980        let size = Vec2::new(1600.0, 900.0);
981        let camera = make_camera(
982            Projection::Orthographic(OrthographicProjection::default_3d()),
983            size,
984        );
985        let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
986        assert_eq!(ray.direction, transform.forward());
987        assert!(ray
988            .origin
989            .abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 0.0), 1e-4));
990        let ray = camera.viewport_to_world(&transform, size).unwrap();
991        assert_eq!(ray.direction, transform.forward());
992        assert!(ray
993            .origin
994            .abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 0.0), 1e-4));
995    }
996
997    #[test]
998    fn viewport_to_world_orthographic_2d_returns_forward() {
999        let transform = GlobalTransform::default();
1000        let size = Vec2::new(1600.0, 900.0);
1001        let camera = make_camera(
1002            Projection::Orthographic(OrthographicProjection::default_2d()),
1003            size,
1004        );
1005        let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
1006        assert_eq!(ray.direction, transform.forward());
1007        assert!(ray
1008            .origin
1009            .abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 1000.0), 1e-4));
1010        let ray = camera.viewport_to_world(&transform, size).unwrap();
1011        assert_eq!(ray.direction, transform.forward());
1012        assert!(ray
1013            .origin
1014            .abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 1000.0), 1e-4));
1015    }
1016
1017    #[test]
1018    fn viewport_to_world_perspective_center_returns_forward() {
1019        let transform = GlobalTransform::default();
1020        let size = Vec2::new(1600.0, 900.0);
1021        let camera = make_camera(
1022            Projection::Perspective(PerspectiveProjection::default()),
1023            size,
1024        );
1025        let ray = camera.viewport_to_world(&transform, size * 0.5).unwrap();
1026        assert_eq!(ray.direction, transform.forward());
1027        assert_eq!(ray.origin, transform.forward() * 0.1);
1028    }
1029}