bevy_camera/camera.rs
1use crate::primitives::Frustum;
2
3use super::{
4 visibility::{Visibility, VisibleEntities},
5 ClearColorConfig, MsaaWriteback,
6};
7use bevy_asset::Handle;
8use bevy_derive::Deref;
9use bevy_ecs::{component::Component, entity::Entity, reflect::ReflectComponent};
10use bevy_image::Image;
11use bevy_math::{ops, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, Vec2, Vec3, Vec3A};
12use bevy_reflect::prelude::*;
13use bevy_transform::components::{GlobalTransform, Transform};
14use bevy_window::{NormalizedWindowRef, WindowRef};
15use core::ops::Range;
16use derive_more::derive::From;
17use thiserror::Error;
18use wgpu_types::{BlendState, TextureUsages};
19
20/// Render viewport configuration for the [`Camera`] component.
21///
22/// The viewport defines the area on the render target to which the camera renders its image.
23/// You can overlay multiple cameras in a single window using viewports to create effects like
24/// split screen, minimaps, and character viewers.
25#[derive(Reflect, Debug, Clone)]
26#[reflect(Default, Clone)]
27pub struct Viewport {
28 /// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`].
29 /// (0,0) corresponds to the top-left corner
30 pub physical_position: UVec2,
31 /// The physical size of the viewport rectangle to render to within the [`RenderTarget`] of this [`Camera`].
32 /// The origin of the rectangle is in the top-left corner.
33 pub physical_size: UVec2,
34 /// The minimum and maximum depth to render (on a scale from 0.0 to 1.0).
35 pub depth: Range<f32>,
36}
37
38impl Default for Viewport {
39 fn default() -> Self {
40 Self {
41 physical_position: Default::default(),
42 physical_size: UVec2::new(1, 1),
43 depth: 0.0..1.0,
44 }
45 }
46}
47
48impl Viewport {
49 /// Cut the viewport rectangle so that it lies inside a rectangle of the
50 /// given size.
51 ///
52 /// If either of the viewport's position coordinates lies outside the given
53 /// dimensions, it will be moved just inside first. If either of the given
54 /// dimensions is zero, the position and size of the viewport rectangle will
55 /// both be set to zero in that dimension.
56 pub fn clamp_to_size(&mut self, size: UVec2) {
57 // If the origin of the viewport rect is outside, then adjust so that
58 // it's just barely inside. Then, cut off the part that is outside.
59 if self.physical_size.x + self.physical_position.x > size.x {
60 if self.physical_position.x < size.x {
61 self.physical_size.x = size.x - self.physical_position.x;
62 } else if size.x > 0 {
63 self.physical_position.x = size.x - 1;
64 self.physical_size.x = 1;
65 } else {
66 self.physical_position.x = 0;
67 self.physical_size.x = 0;
68 }
69 }
70 if self.physical_size.y + self.physical_position.y > size.y {
71 if self.physical_position.y < size.y {
72 self.physical_size.y = size.y - self.physical_position.y;
73 } else if size.y > 0 {
74 self.physical_position.y = size.y - 1;
75 self.physical_size.y = 1;
76 } else {
77 self.physical_position.y = 0;
78 self.physical_size.y = 0;
79 }
80 }
81 }
82
83 pub fn from_viewport_and_override(
84 viewport: Option<&Self>,
85 main_pass_resolution_override: Option<&MainPassResolutionOverride>,
86 ) -> Option<Self> {
87 if let Some(override_size) = main_pass_resolution_override {
88 let mut vp = viewport.map_or_else(Self::default, Self::clone);
89 vp.physical_size = **override_size;
90 Some(vp)
91 } else {
92 viewport.cloned()
93 }
94 }
95}
96
97/// Override the resolution a 3d camera's main pass is rendered at.
98///
99/// Does not affect post processing.
100///
101/// ## Usage
102///
103/// * Insert this component on a 3d camera entity in the render world.
104/// * The resolution override must be smaller than the camera's viewport size.
105/// * The resolution override is specified in physical pixels.
106/// * In shaders, use `View::main_pass_viewport` instead of `View::viewport`.
107#[derive(Component, Reflect, Deref, Debug)]
108#[reflect(Component)]
109pub struct MainPassResolutionOverride(pub UVec2);
110
111/// Settings to define a camera sub view.
112///
113/// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the
114/// image defined by `size` and `offset` (relative to the `full_size` of the
115/// whole image) is projected to the cameras viewport.
116///
117/// Take the example of the following multi-monitor setup:
118/// ```css
119/// ┌───┬───┐
120/// │ A │ B │
121/// ├───┼───┤
122/// │ C │ D │
123/// └───┴───┘
124/// ```
125/// If each monitor is 1920x1080, the whole image will have a resolution of
126/// 3840x2160. For each monitor we can use a single camera with a viewport of
127/// the same size as the monitor it corresponds to. To ensure that the image is
128/// cohesive, we can use a different sub view on each camera:
129/// - Camera A: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,0
130/// - Camera B: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 1920,0
131/// - Camera C: `full_size` = 3840x2160, `size` = 1920x1080, `offset` = 0,1080
132/// - Camera D: `full_size` = 3840x2160, `size` = 1920x1080, `offset` =
133/// 1920,1080
134///
135/// However since only the ratio between the values is important, they could all
136/// be divided by 120 and still produce the same image. Camera D would for
137/// example have the following values:
138/// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9
139#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
140#[reflect(Clone, PartialEq, Default)]
141pub struct SubCameraView {
142 /// Size of the entire camera view
143 pub full_size: UVec2,
144 /// Offset of the sub camera
145 pub offset: Vec2,
146 /// Size of the sub camera
147 pub size: UVec2,
148}
149
150impl Default for SubCameraView {
151 fn default() -> Self {
152 Self {
153 full_size: UVec2::new(1, 1),
154 offset: Vec2::new(0., 0.),
155 size: UVec2::new(1, 1),
156 }
157 }
158}
159
160/// Information about the current [`RenderTarget`].
161#[derive(Debug, Clone)]
162pub struct RenderTargetInfo {
163 /// The physical size of this render target (in physical pixels, ignoring scale factor).
164 pub physical_size: UVec2,
165 /// The scale factor of this render target.
166 ///
167 /// When rendering to a window, typically it is a value greater or equal than 1.0,
168 /// representing the ratio between the size of the window in physical pixels and the logical size of the window.
169 pub scale_factor: f32,
170}
171
172impl Default for RenderTargetInfo {
173 fn default() -> Self {
174 Self {
175 physical_size: Default::default(),
176 scale_factor: 1.,
177 }
178 }
179}
180
181/// Holds internally computed [`Camera`] values.
182#[derive(Default, Debug, Clone)]
183pub struct ComputedCameraValues {
184 pub clip_from_view: Mat4,
185 pub target_info: Option<RenderTargetInfo>,
186 // size of the `Viewport`
187 pub old_viewport_size: Option<UVec2>,
188 pub old_sub_camera_view: Option<SubCameraView>,
189}
190
191/// How much energy a [`Camera3d`](crate::Camera3d) absorbs from incoming light.
192///
193/// <https://en.wikipedia.org/wiki/Exposure_(photography)>
194#[derive(Component, Clone, Copy, Reflect)]
195#[reflect(opaque)]
196#[reflect(Component, Default, Clone)]
197pub struct Exposure {
198 /// <https://en.wikipedia.org/wiki/Exposure_value#Tabulated_exposure_values>
199 pub ev100: f32,
200}
201
202impl Exposure {
203 pub const SUNLIGHT: Self = Self {
204 ev100: Self::EV100_SUNLIGHT,
205 };
206 pub const OVERCAST: Self = Self {
207 ev100: Self::EV100_OVERCAST,
208 };
209 pub const INDOOR: Self = Self {
210 ev100: Self::EV100_INDOOR,
211 };
212 /// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
213 /// It also happens to be a reasonable default.
214 ///
215 /// See <https://github.com/bevyengine/bevy/issues/11577> for details.
216 pub const BLENDER: Self = Self {
217 ev100: Self::EV100_BLENDER,
218 };
219
220 pub const EV100_SUNLIGHT: f32 = 15.0;
221 pub const EV100_OVERCAST: f32 = 12.0;
222 pub const EV100_INDOOR: f32 = 7.0;
223
224 /// This value was calibrated to match Blender's implicit/default exposure as closely as possible.
225 /// It also happens to be a reasonable default.
226 ///
227 /// See <https://github.com/bevyengine/bevy/issues/11577> for details.
228 pub const EV100_BLENDER: f32 = 9.7;
229
230 pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
231 Self {
232 ev100: physical_camera_parameters.ev100(),
233 }
234 }
235
236 /// Converts EV100 values to exposure values.
237 /// <https://google.github.io/filament/Filament.md.html#imagingpipeline/physicallybasedcamera/exposure>
238 #[inline]
239 pub fn exposure(&self) -> f32 {
240 ops::exp2(-self.ev100) / 1.2
241 }
242}
243
244impl Default for Exposure {
245 fn default() -> Self {
246 Self::BLENDER
247 }
248}
249
250/// Parameters based on physical camera characteristics for calculating EV100
251/// values for use with [`Exposure`]. This is also used for depth of field.
252#[derive(Clone, Copy)]
253pub struct PhysicalCameraParameters {
254 /// <https://en.wikipedia.org/wiki/F-number>
255 pub aperture_f_stops: f32,
256 /// <https://en.wikipedia.org/wiki/Shutter_speed>
257 pub shutter_speed_s: f32,
258 /// <https://en.wikipedia.org/wiki/Film_speed>
259 pub sensitivity_iso: f32,
260 /// The height of the [image sensor format] in meters.
261 ///
262 /// Focal length is derived from the FOV and this value. The default is
263 /// 18.66mm, matching the [Super 35] format, which is popular in cinema.
264 ///
265 /// [image sensor format]: https://en.wikipedia.org/wiki/Image_sensor_format
266 ///
267 /// [Super 35]: https://en.wikipedia.org/wiki/Super_35
268 pub sensor_height: f32,
269}
270
271impl PhysicalCameraParameters {
272 /// Calculate the [EV100](https://en.wikipedia.org/wiki/Exposure_value).
273 pub fn ev100(&self) -> f32 {
274 ops::log2(
275 self.aperture_f_stops * self.aperture_f_stops * 100.0
276 / (self.shutter_speed_s * self.sensitivity_iso),
277 )
278 }
279}
280
281impl Default for PhysicalCameraParameters {
282 fn default() -> Self {
283 Self {
284 aperture_f_stops: 1.0,
285 shutter_speed_s: 1.0 / 125.0,
286 sensitivity_iso: 100.0,
287 sensor_height: 0.01866,
288 }
289 }
290}
291
292/// Error returned when a conversion between world-space and viewport-space coordinates fails.
293///
294/// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world].
295#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]
296pub enum ViewportConversionError {
297 /// The pre-computed size of the viewport was not available.
298 ///
299 /// This may be because the `Camera` was just created and `camera_system` has not been executed
300 /// yet, or because the [`RenderTarget`] is misconfigured in one of the following ways:
301 /// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
302 /// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
303 /// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
304 /// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
305 #[error("pre-computed size of viewport not available")]
306 NoViewportSize,
307 /// The computed coordinate was beyond the `Camera`'s near plane.
308 ///
309 /// Only applicable when converting from world-space to viewport-space.
310 #[error("computed coordinate beyond `Camera`'s near plane")]
311 PastNearPlane,
312 /// The computed coordinate was beyond the `Camera`'s far plane.
313 ///
314 /// Only applicable when converting from world-space to viewport-space.
315 #[error("computed coordinate beyond `Camera`'s far plane")]
316 PastFarPlane,
317 /// The Normalized Device Coordinates could not be computed because the `camera_transform`, the
318 /// `world_position`, or the projection matrix defined by [`Projection`](super::projection::Projection)
319 /// contained `NAN` (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]).
320 #[error("found NaN while computing NDC")]
321 InvalidData,
322}
323
324/// The defining [`Component`] for camera entities,
325/// storing information about how and what to render through this camera.
326///
327/// The [`Camera`] component is added to an entity to define the properties of the viewpoint from
328/// which rendering occurs. It defines the position of the view to render, the projection method
329/// to transform the 3D objects into a 2D image, as well as the render target into which that image
330/// is produced.
331///
332/// Note that a [`Camera`] needs a `CameraRenderGraph` to render anything.
333/// This is typically provided by adding a [`Camera2d`] or [`Camera3d`] component,
334/// but custom render graphs can also be defined. Inserting a [`Camera`] with no render
335/// graph will emit an error at runtime.
336///
337/// [`Camera2d`]: crate::Camera2d
338/// [`Camera3d`]: crate::Camera3d
339#[derive(Component, Debug, Reflect, Clone)]
340#[reflect(Component, Default, Debug, Clone)]
341#[require(
342 Frustum,
343 CameraMainTextureUsages,
344 VisibleEntities,
345 Transform,
346 Visibility,
347 RenderTarget
348)]
349pub struct Camera {
350 /// If set, this camera will render to the given [`Viewport`] rectangle within the configured [`RenderTarget`].
351 pub viewport: Option<Viewport>,
352 /// Cameras with a higher order are rendered later, and thus on top of lower order cameras.
353 pub order: isize,
354 /// If this is set to `true`, this camera will be rendered to its specified [`RenderTarget`]. If `false`, this
355 /// camera will not be rendered.
356 pub is_active: bool,
357 /// Computed values for this camera, such as the projection matrix and the render target size.
358 #[reflect(ignore, clone)]
359 pub computed: ComputedCameraValues,
360 // todo: reflect this when #6042 lands
361 /// The [`CameraOutputMode`] for this camera.
362 pub output_mode: CameraOutputMode,
363 /// Controls when MSAA writeback occurs for this camera.
364 /// See [`MsaaWriteback`] for available options.
365 pub msaa_writeback: MsaaWriteback,
366 /// The clear color operation to perform on the render target.
367 pub clear_color: ClearColorConfig,
368 /// Whether to switch culling mode so that materials that request backface
369 /// culling cull front faces, and vice versa.
370 ///
371 /// This is typically used for cameras that mirror the world that they
372 /// render across a plane, because doing that flips the winding of each
373 /// polygon.
374 ///
375 /// This setting doesn't affect materials that disable backface culling.
376 pub invert_culling: bool,
377 /// If set, this camera will be a sub camera of a large view, defined by a [`SubCameraView`].
378 pub sub_camera_view: Option<SubCameraView>,
379}
380
381impl Default for Camera {
382 fn default() -> Self {
383 Self {
384 is_active: true,
385 order: 0,
386 viewport: None,
387 computed: Default::default(),
388 output_mode: Default::default(),
389 msaa_writeback: MsaaWriteback::default(),
390 clear_color: Default::default(),
391 invert_culling: false,
392 sub_camera_view: None,
393 }
394 }
395}
396
397impl Camera {
398 /// Converts a physical size in this `Camera` to a logical size.
399 #[inline]
400 pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
401 let scale = self.computed.target_info.as_ref()?.scale_factor;
402 Some(physical_size.as_vec2() / scale)
403 }
404
405 /// The rendered physical bounds [`URect`] of the camera. If the `viewport` field is
406 /// set to [`Some`], this will be the rect of that custom viewport. Otherwise it will default to
407 /// the full physical rect of the current [`RenderTarget`].
408 #[inline]
409 pub fn physical_viewport_rect(&self) -> Option<URect> {
410 let min = self
411 .viewport
412 .as_ref()
413 .map(|v| v.physical_position)
414 .unwrap_or(UVec2::ZERO);
415 let max = min + self.physical_viewport_size()?;
416 Some(URect { min, max })
417 }
418
419 /// The rendered logical bounds [`Rect`] of the camera. If the `viewport` field is set to
420 /// [`Some`], this will be the rect of that custom viewport. Otherwise it will default to the
421 /// full logical rect of the current [`RenderTarget`].
422 #[inline]
423 pub fn logical_viewport_rect(&self) -> Option<Rect> {
424 let URect { min, max } = self.physical_viewport_rect()?;
425 Some(Rect {
426 min: self.to_logical(min)?,
427 max: self.to_logical(max)?,
428 })
429 }
430
431 /// The logical size of this camera's viewport. If the `viewport` field is set to [`Some`], this
432 /// will be the size of that custom viewport. Otherwise it will default to the full logical size
433 /// of the current [`RenderTarget`].
434 /// For logic that requires the full logical size of the
435 /// [`RenderTarget`], prefer [`Camera::logical_target_size`].
436 ///
437 /// Returns `None` if either:
438 /// - the function is called just after the `Camera` is created, before `camera_system` is executed,
439 /// - the [`RenderTarget`] isn't correctly set:
440 /// - it references the [`PrimaryWindow`](RenderTarget::Window) when there is none,
441 /// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component,
442 /// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle),
443 /// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle).
444 #[inline]
445 pub fn logical_viewport_size(&self) -> Option<Vec2> {
446 self.viewport
447 .as_ref()
448 .and_then(|v| self.to_logical(v.physical_size))
449 .or_else(|| self.logical_target_size())
450 }
451
452 /// The physical size of this camera's viewport (in physical pixels).
453 /// If the `viewport` field is set to [`Some`], this
454 /// will be the size of that custom viewport. Otherwise it will default to the full physical size of
455 /// the current [`RenderTarget`].
456 /// For logic that requires the full physical size of the [`RenderTarget`], prefer [`Camera::physical_target_size`].
457 #[inline]
458 pub fn physical_viewport_size(&self) -> Option<UVec2> {
459 self.viewport
460 .as_ref()
461 .map(|v| v.physical_size)
462 .or_else(|| self.physical_target_size())
463 }
464
465 /// The full logical size of this camera's [`RenderTarget`], ignoring custom `viewport` configuration.
466 /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
467 /// For logic that requires the size of the actually rendered area, prefer [`Camera::logical_viewport_size`].
468 #[inline]
469 pub fn logical_target_size(&self) -> Option<Vec2> {
470 self.computed
471 .target_info
472 .as_ref()
473 .and_then(|t| self.to_logical(t.physical_size))
474 }
475
476 /// The full physical size of this camera's [`RenderTarget`] (in physical pixels),
477 /// ignoring custom `viewport` configuration.
478 /// Note that if the `viewport` field is [`Some`], this will not represent the size of the rendered area.
479 /// For logic that requires the size of the actually rendered area, prefer [`Camera::physical_viewport_size`].
480 #[inline]
481 pub fn physical_target_size(&self) -> Option<UVec2> {
482 self.computed.target_info.as_ref().map(|t| t.physical_size)
483 }
484
485 #[inline]
486 pub fn target_scaling_factor(&self) -> Option<f32> {
487 self.computed
488 .target_info
489 .as_ref()
490 .map(|t: &RenderTargetInfo| t.scale_factor)
491 }
492
493 /// The projection matrix computed using this camera's [`Projection`](super::projection::Projection).
494 #[inline]
495 pub fn clip_from_view(&self) -> Mat4 {
496 self.computed.clip_from_view
497 }
498
499 /// Core conversion logic to compute viewport coordinates
500 ///
501 /// This function is shared by `world_to_viewport` and `world_to_viewport_with_depth`
502 /// to avoid code duplication.
503 ///
504 /// Returns a tuple `(viewport_position, depth)`.
505 fn world_to_viewport_core(
506 &self,
507 camera_transform: &GlobalTransform,
508 world_position: Vec3,
509 ) -> Result<(Vec2, f32), ViewportConversionError> {
510 let target_rect = self
511 .logical_viewport_rect()
512 .ok_or(ViewportConversionError::NoViewportSize)?;
513 let mut ndc_space_coords = self
514 .world_to_ndc(camera_transform, world_position)
515 .ok_or(ViewportConversionError::InvalidData)?;
516 // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space
517 if ndc_space_coords.z < 0.0 {
518 return Err(ViewportConversionError::PastFarPlane);
519 }
520 if ndc_space_coords.z > 1.0 {
521 return Err(ViewportConversionError::PastNearPlane);
522 }
523
524 let depth = ndc_space_coords.z;
525
526 // Flip the Y co-ordinate origin from the bottom to the top.
527 ndc_space_coords.y = -ndc_space_coords.y;
528
529 // Once in NDC space, we can discard the z element and map x/y to the viewport rect
530 let viewport_position =
531 (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
532 Ok((viewport_position, depth))
533 }
534
535 /// Given a position in world space, use the camera to compute the viewport-space coordinates.
536 ///
537 /// To get the coordinates in Normalized Device Coordinates, you should use
538 /// [`world_to_ndc`](Self::world_to_ndc).
539 ///
540 /// # Panics
541 ///
542 /// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
543 /// (see [`world_to_ndc`][Self::world_to_ndc]).
544 #[doc(alias = "world_to_screen")]
545 pub fn world_to_viewport(
546 &self,
547 camera_transform: &GlobalTransform,
548 world_position: Vec3,
549 ) -> Result<Vec2, ViewportConversionError> {
550 Ok(self
551 .world_to_viewport_core(camera_transform, world_position)?
552 .0)
553 }
554
555 /// Given a position in world space, use the camera to compute the viewport-space coordinates and depth.
556 ///
557 /// To get the coordinates in Normalized Device Coordinates, you should use
558 /// [`world_to_ndc`](Self::world_to_ndc).
559 ///
560 /// # Panics
561 ///
562 /// Will panic if `glam_assert` is enabled and the `camera_transform` contains `NAN`
563 /// (see [`world_to_ndc`][Self::world_to_ndc]).
564 #[doc(alias = "world_to_screen_with_depth")]
565 pub fn world_to_viewport_with_depth(
566 &self,
567 camera_transform: &GlobalTransform,
568 world_position: Vec3,
569 ) -> Result<Vec3, ViewportConversionError> {
570 let result = self.world_to_viewport_core(camera_transform, world_position)?;
571 // Stretching ndc depth to value via near plane and negating result to be in positive room again.
572 let depth = -self.depth_ndc_to_view_z(result.1);
573 Ok(result.0.extend(depth))
574 }
575
576 /// Returns a ray originating from the camera, that passes through everything beyond `viewport_position`.
577 ///
578 /// The resulting ray starts on the near plane of the camera.
579 ///
580 /// If the camera's projection is orthographic the direction of the ray is always equal to `camera_transform.forward()`.
581 ///
582 /// To get the world space coordinates with Normalized Device Coordinates, you should use
583 /// [`ndc_to_world`](Self::ndc_to_world).
584 ///
585 /// # Example
586 /// ```no_run
587 /// # use bevy_window::Window;
588 /// # use bevy_ecs::prelude::{Single, IntoScheduleConfigs};
589 /// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
590 /// # use bevy_camera::Camera;
591 /// # use bevy_app::{App, PostUpdate};
592 /// #
593 /// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
594 /// let (camera, camera_transform) = *camera_query;
595 ///
596 /// if let Some(cursor_position) = window.cursor_position()
597 /// // Calculate a ray pointing from the camera into the world based on the cursor's position.
598 /// && let Ok(ray) = camera.viewport_to_world(camera_transform, cursor_position)
599 /// {
600 /// println!("{ray:?}");
601 /// }
602 /// }
603 ///
604 /// # let mut app = App::new();
605 /// // Run the system after transform propagation so the camera's global transform is up-to-date.
606 /// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
607 /// ```
608 ///
609 /// # Panics
610 ///
611 /// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
612 /// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
613 pub fn viewport_to_world(
614 &self,
615 camera_transform: &GlobalTransform,
616 viewport_position: Vec2,
617 ) -> Result<Ray3d, ViewportConversionError> {
618 let ndc_xy = self.viewport_to_ndc(viewport_position)?;
619
620 let ndc_point_near = ndc_xy.extend(1.0).into();
621 // Using EPSILON because an ndc with Z = 0 returns NaNs.
622 let ndc_point_far = ndc_xy.extend(f32::EPSILON).into();
623 let view_from_clip = self.computed.clip_from_view.inverse();
624 let world_from_view = camera_transform.affine();
625 // We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
626 // (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
627 // Additionally, we avoid adding and subtracting translation to the direction component to maintain precision.
628 let view_point_near = view_from_clip.project_point3a(ndc_point_near);
629 let view_point_far = view_from_clip.project_point3a(ndc_point_far);
630 let view_dir = view_point_far - view_point_near;
631 let origin = world_from_view.transform_point3a(view_point_near).into();
632 let direction = world_from_view.transform_vector3a(view_dir).into();
633
634 // The fallible direction constructor ensures that direction isn't NaN.
635 Dir3::new(direction)
636 .map_err(|_| ViewportConversionError::InvalidData)
637 .map(|direction| Ray3d { origin, direction })
638 }
639
640 /// Returns a 2D world position computed from a position on this [`Camera`]'s viewport.
641 ///
642 /// Useful for 2D cameras and other cameras with an orthographic projection pointing along the Z axis.
643 ///
644 /// To get the world space coordinates with Normalized Device Coordinates, you should use
645 /// [`ndc_to_world`](Self::ndc_to_world).
646 ///
647 /// # Example
648 /// ```no_run
649 /// # use bevy_window::Window;
650 /// # use bevy_ecs::prelude::*;
651 /// # use bevy_transform::prelude::{GlobalTransform, TransformSystems};
652 /// # use bevy_camera::Camera;
653 /// # use bevy_app::{App, PostUpdate};
654 /// #
655 /// fn system(camera_query: Single<(&Camera, &GlobalTransform)>, window: Single<&Window>) {
656 /// let (camera, camera_transform) = *camera_query;
657 ///
658 /// if let Some(cursor_position) = window.cursor_position()
659 /// // Calculate a world position based on the cursor's position.
660 /// && let Ok(world_pos) = camera.viewport_to_world_2d(camera_transform, cursor_position)
661 /// {
662 /// println!("World position: {world_pos:.2}");
663 /// }
664 /// }
665 ///
666 /// # let mut app = App::new();
667 /// // Run the system after transform propagation so the camera's global transform is up-to-date.
668 /// app.add_systems(PostUpdate, system.after(TransformSystems::Propagate));
669 /// ```
670 ///
671 /// # Panics
672 ///
673 /// Will panic if the camera's projection matrix is invalid (has a determinant of 0) and
674 /// `glam_assert` is enabled (see [`ndc_to_world`](Self::ndc_to_world).
675 pub fn viewport_to_world_2d(
676 &self,
677 camera_transform: &GlobalTransform,
678 viewport_position: Vec2,
679 ) -> Result<Vec2, ViewportConversionError> {
680 let ndc = self.viewport_to_ndc(viewport_position)?;
681
682 let world_near_plane = self
683 .ndc_to_world(camera_transform, ndc.extend(1.))
684 .ok_or(ViewportConversionError::InvalidData)?;
685
686 Ok(world_near_plane.truncate())
687 }
688
689 /// Given a point in world space, use the camera's viewport to compute the Normalized Device Coordinates of the point.
690 ///
691 /// When the point is within the viewport the values returned will be between -1.0 (bottom left) and 1.0 (top right)
692 /// on the X and Y axes, and between 0.0 (far) and 1.0 (near) on the Z axis.
693 /// To get the coordinates in the render target's viewport dimensions, you should use
694 /// [`world_to_viewport`](Self::world_to_viewport).
695 ///
696 /// Returns `None` if the `camera_transform`, the `world_position`, or the projection matrix defined by
697 /// [`Projection`](super::projection::Projection) contain `NAN`.
698 ///
699 /// # Panics
700 ///
701 /// Will panic if the `camera_transform` contains `NAN` and the `glam_assert` feature is enabled.
702 pub fn world_to_ndc<V: Into<Vec3A> + From<Vec3A>>(
703 &self,
704 camera_transform: &GlobalTransform,
705 world_point: V,
706 ) -> Option<V> {
707 let view_from_world = camera_transform.affine().inverse();
708 let view_point = view_from_world.transform_point3a(world_point.into());
709 let ndc_point = self.computed.clip_from_view.project_point3a(view_point);
710
711 (!ndc_point.is_nan()).then_some(ndc_point.into())
712 }
713
714 /// Given a position in Normalized Device Coordinates,
715 /// use the camera's viewport to compute the world space position.
716 ///
717 /// The input is expected to be in NDC: `x` and `y` in the range `[-1.0, 1.0]`, and `z` in `[0.0, 1.0]`
718 /// (with `z = 0.0` at the far plane and `z = 1.0` at the near plane).
719 /// The returned value is a position in world space (your game's world units) and is not limited to `[-1.0, 1.0]`.
720 /// To convert from a viewport position to world space, you should use
721 /// [`viewport_to_world`](Self::viewport_to_world).
722 ///
723 /// Returns `None` if the `camera_transform`, the `ndc_point`, or the projection matrix defined by
724 /// [`Projection`](super::projection::Projection) contain `NAN`.
725 ///
726 /// # Panics
727 ///
728 /// Will panic if the projection matrix is invalid (has a determinant of 0) and `glam_assert` is enabled.
729 pub fn ndc_to_world<V: Into<Vec3A> + From<Vec3A>>(
730 &self,
731 camera_transform: &GlobalTransform,
732 ndc_point: V,
733 ) -> Option<V> {
734 // We multiply the point by `view_from_clip` and then `world_from_view` in sequence to avoid the precision loss
735 // (and performance penalty) incurred by pre-composing an affine transform with a projective transform.
736 let view_point = self
737 .computed
738 .clip_from_view
739 .inverse()
740 .project_point3a(ndc_point.into());
741 let world_point = camera_transform.affine().transform_point3a(view_point);
742
743 (!world_point.is_nan()).then_some(world_point.into())
744 }
745
746 /// Converts the depth in Normalized Device Coordinates
747 /// to linear view z for perspective projections.
748 ///
749 /// Note: Depth values in front of the camera will be negative as -z is forward
750 pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
751 let near = self.clip_from_view().w_axis.z; // [3][2]
752 -near / ndc_depth
753 }
754
755 /// Converts the depth in Normalized Device Coordinates
756 /// to linear view z for orthographic projections.
757 ///
758 /// Note: Depth values in front of the camera will be negative as -z is forward
759 pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
760 -(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
761 // [3][2] [2][2]
762 }
763
764 /// Converts a position in viewport coordinates to NDC.
765 pub fn viewport_to_ndc(
766 &self,
767 viewport_position: Vec2,
768 ) -> Result<Vec2, ViewportConversionError> {
769 let target_rect = self
770 .logical_viewport_rect()
771 .ok_or(ViewportConversionError::NoViewportSize)?;
772 let rect_relative = (viewport_position - target_rect.min) / target_rect.size();
773 let mut ndc = rect_relative * 2. - Vec2::ONE;
774 // Flip the Y co-ordinate from the top to the bottom to enter NDC.
775 ndc.y = -ndc.y;
776 Ok(ndc)
777 }
778}
779
780/// Control how this [`Camera`] outputs once rendering is completed.
781#[derive(Debug, Clone, Copy, Reflect)]
782pub enum CameraOutputMode {
783 /// Writes the camera output to configured render target.
784 Write {
785 /// The blend state that will be used by the pipeline that writes the intermediate render textures to the final render target texture.
786 /// If not set, the output will be written as-is, ignoring `clear_color` and the existing data in the final render target texture.
787 blend_state: Option<BlendState>,
788 /// The clear color operation to perform on the final render target texture.
789 clear_color: ClearColorConfig,
790 },
791 /// Skips writing the camera output to the configured render target. The output will remain in the
792 /// Render Target's "intermediate" textures, which a camera with a higher order should write to the render target
793 /// using [`CameraOutputMode::Write`]. The "skip" mode can easily prevent render results from being displayed, or cause
794 /// them to be lost. Only use this if you know what you are doing!
795 /// In camera setups with multiple active cameras rendering to the same [`RenderTarget`], the Skip mode can be used to remove
796 /// unnecessary / redundant writes to the final output texture, removing unnecessary render passes.
797 Skip,
798}
799
800impl Default for CameraOutputMode {
801 fn default() -> Self {
802 CameraOutputMode::Write {
803 blend_state: None,
804 clear_color: ClearColorConfig::Default,
805 }
806 }
807}
808
809/// The "target" that a [`Camera`] will render to. For example, this could be a `Window`
810/// swapchain or an [`Image`].
811#[derive(Component, Debug, Clone, Reflect, From)]
812#[reflect(Clone, Component)]
813pub enum RenderTarget {
814 /// Window to which the camera's view is rendered.
815 Window(WindowRef),
816 /// Image to which the camera's view is rendered.
817 Image(ImageRenderTarget),
818 /// Texture View to which the camera's view is rendered.
819 /// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
820 TextureView(ManualTextureViewHandle),
821 /// The camera won't render to any color target.
822 ///
823 /// This is useful when you want a camera that *only* renders prepasses, for
824 /// example a depth prepass. See the `render_depth_to_texture` example.
825 None {
826 /// The physical size of the viewport.
827 size: UVec2,
828 },
829}
830
831impl RenderTarget {
832 /// Get a handle to the render target's image,
833 /// or `None` if the render target is another variant.
834 pub fn as_image(&self) -> Option<&Handle<Image>> {
835 if let Self::Image(image_target) = self {
836 Some(&image_target.handle)
837 } else {
838 None
839 }
840 }
841}
842
843impl RenderTarget {
844 /// Normalize the render target down to a more concrete value, mostly used for equality comparisons.
845 pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
846 match self {
847 RenderTarget::Window(window_ref) => window_ref
848 .normalize(primary_window)
849 .map(NormalizedRenderTarget::Window),
850 RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
851 RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
852 RenderTarget::None { size } => Some(NormalizedRenderTarget::None {
853 width: size.x,
854 height: size.y,
855 }),
856 }
857 }
858}
859
860/// Normalized version of the render target.
861///
862/// Once we have this we shouldn't need to resolve it down anymore.
863#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
864#[reflect(Clone, PartialEq, Hash)]
865pub enum NormalizedRenderTarget {
866 /// Window to which the camera's view is rendered.
867 Window(NormalizedWindowRef),
868 /// Image to which the camera's view is rendered.
869 Image(ImageRenderTarget),
870 /// Texture View to which the camera's view is rendered.
871 /// Useful when the texture view needs to be created outside of Bevy, for example OpenXR.
872 TextureView(ManualTextureViewHandle),
873 /// The camera won't render to any color target.
874 ///
875 /// This is useful when you want a camera that *only* renders prepasses, for
876 /// example a depth prepass. See the `render_depth_to_texture` example.
877 None {
878 /// The physical width of the viewport.
879 width: u32,
880 /// The physical height of the viewport.
881 height: u32,
882 },
883}
884
885/// A unique id that corresponds to a specific `ManualTextureView` in the `ManualTextureViews` collection.
886///
887/// See `ManualTextureViews` in `bevy_camera` for more details.
888#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)]
889#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)]
890pub struct ManualTextureViewHandle(pub u32);
891
892/// A render target that renders to an [`Image`].
893#[derive(Debug, Clone, Reflect)]
894#[reflect(Clone, PartialEq, Hash)]
895pub struct ImageRenderTarget {
896 /// The image to render to.
897 pub handle: Handle<Image>,
898 /// The scale factor of the render target image, corresponding to the scale
899 /// factor for a window target. This should almost always be 1.0.
900 pub scale_factor: f32,
901}
902
903impl Eq for ImageRenderTarget {}
904
905impl PartialEq for ImageRenderTarget {
906 fn eq(&self, other: &Self) -> bool {
907 self.handle == other.handle && FloatOrd(self.scale_factor) == FloatOrd(other.scale_factor)
908 }
909}
910
911impl core::hash::Hash for ImageRenderTarget {
912 fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
913 self.handle.hash(state);
914 FloatOrd(self.scale_factor).hash(state);
915 }
916}
917
918impl PartialOrd for ImageRenderTarget {
919 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
920 Some(self.cmp(other))
921 }
922}
923
924impl Ord for ImageRenderTarget {
925 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
926 self.handle
927 .cmp(&other.handle)
928 .then_with(|| FloatOrd(self.scale_factor).cmp(&FloatOrd(other.scale_factor)))
929 }
930}
931
932impl From<Handle<Image>> for RenderTarget {
933 fn from(handle: Handle<Image>) -> Self {
934 Self::Image(handle.into())
935 }
936}
937
938impl From<Handle<Image>> for ImageRenderTarget {
939 fn from(handle: Handle<Image>) -> Self {
940 Self {
941 handle,
942 scale_factor: 1.0,
943 }
944 }
945}
946
947impl Default for RenderTarget {
948 fn default() -> Self {
949 Self::Window(Default::default())
950 }
951}
952
953/// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera
954#[derive(Component, Clone, Copy, Reflect)]
955#[reflect(opaque)]
956#[reflect(Component, Default, Clone)]
957pub struct CameraMainTextureUsages(pub TextureUsages);
958
959impl Default for CameraMainTextureUsages {
960 fn default() -> Self {
961 Self(
962 TextureUsages::RENDER_ATTACHMENT
963 | TextureUsages::TEXTURE_BINDING
964 | TextureUsages::COPY_SRC,
965 )
966 }
967}
968
969impl CameraMainTextureUsages {
970 pub fn with(mut self, usages: TextureUsages) -> Self {
971 self.0 |= usages;
972 self
973 }
974}
975
976#[cfg(test)]
977mod test {
978 use bevy_math::{Vec2, Vec3};
979 use bevy_transform::components::GlobalTransform;
980
981 use crate::{
982 Camera, OrthographicProjection, PerspectiveProjection, Projection, RenderTargetInfo,
983 Viewport,
984 };
985
986 fn make_camera(mut projection: Projection, physical_size: Vec2) -> Camera {
987 let viewport = Viewport {
988 physical_size: physical_size.as_uvec2(),
989 ..Default::default()
990 };
991 let mut camera = Camera {
992 viewport: Some(viewport.clone()),
993 ..Default::default()
994 };
995 camera.computed.target_info = Some(RenderTargetInfo {
996 physical_size: viewport.physical_size,
997 scale_factor: 1.0,
998 });
999 projection.update(
1000 viewport.physical_size.x as f32,
1001 viewport.physical_size.y as f32,
1002 );
1003 camera.computed.clip_from_view = projection.get_clip_from_view();
1004 camera
1005 }
1006
1007 #[test]
1008 fn viewport_to_world_orthographic_3d_returns_forward() {
1009 let transform = GlobalTransform::default();
1010 let size = Vec2::new(1600.0, 900.0);
1011 let camera = make_camera(
1012 Projection::Orthographic(OrthographicProjection::default_3d()),
1013 size,
1014 );
1015 let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
1016 assert_eq!(ray.direction, transform.forward());
1017 assert!(ray
1018 .origin
1019 .abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 0.0), 1e-4));
1020 let ray = camera.viewport_to_world(&transform, size).unwrap();
1021 assert_eq!(ray.direction, transform.forward());
1022 assert!(ray
1023 .origin
1024 .abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 0.0), 1e-4));
1025 }
1026
1027 #[test]
1028 fn viewport_to_world_orthographic_2d_returns_forward() {
1029 let transform = GlobalTransform::default();
1030 let size = Vec2::new(1600.0, 900.0);
1031 let camera = make_camera(
1032 Projection::Orthographic(OrthographicProjection::default_2d()),
1033 size,
1034 );
1035 let ray = camera.viewport_to_world(&transform, Vec2::ZERO).unwrap();
1036 assert_eq!(ray.direction, transform.forward());
1037 assert!(ray
1038 .origin
1039 .abs_diff_eq(Vec3::new(-size.x * 0.5, size.y * 0.5, 1000.0), 1e-4));
1040 let ray = camera.viewport_to_world(&transform, size).unwrap();
1041 assert_eq!(ray.direction, transform.forward());
1042 assert!(ray
1043 .origin
1044 .abs_diff_eq(Vec3::new(size.x * 0.5, -size.y * 0.5, 1000.0), 1e-4));
1045 }
1046
1047 #[test]
1048 fn viewport_to_world_perspective_center_returns_forward() {
1049 let transform = GlobalTransform::default();
1050 let size = Vec2::new(1600.0, 900.0);
1051 let camera = make_camera(
1052 Projection::Perspective(PerspectiveProjection::default()),
1053 size,
1054 );
1055 let ray = camera.viewport_to_world(&transform, size * 0.5).unwrap();
1056 assert_eq!(ray.direction, transform.forward());
1057 assert_eq!(ray.origin, transform.forward() * 0.1);
1058 }
1059}