1#![expect(
2 clippy::module_inception,
3 reason = "The parent module contains all things viewport-related, while this module handles cameras as a component. However, a rename/refactor which should clear up this lint is being discussed; see #17196."
4)]
5use super::{ClearColorConfig, Projection};
6use crate::{
7 batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport},
8 camera::{CameraProjection, ManualTextureViewHandle, ManualTextureViews},
9 primitives::Frustum,
10 render_asset::RenderAssets,
11 render_graph::{InternedRenderSubGraph, RenderSubGraph},
12 render_resource::TextureView,
13 sync_world::{RenderEntity, SyncToRenderWorld},
14 texture::GpuImage,
15 view::{
16 ColorGrading, ExtractedView, ExtractedWindows, Msaa, NoIndirectDrawing, RenderLayers,
17 RenderVisibleEntities, RetainedViewEntity, ViewUniformOffset, Visibility, VisibleEntities,
18 },
19 Extract,
20};
21use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
22use bevy_derive::{Deref, DerefMut};
23use bevy_ecs::{
24 change_detection::DetectChanges,
25 component::{Component, HookContext},
26 entity::{ContainsEntity, Entity},
27 event::EventReader,
28 prelude::With,
29 query::Has,
30 reflect::ReflectComponent,
31 resource::Resource,
32 system::{Commands, Query, Res, ResMut},
33 world::DeferredWorld,
34};
35use bevy_image::Image;
36use bevy_math::{ops, vec2, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, UVec4, Vec2, Vec3};
37use bevy_platform::collections::{HashMap, HashSet};
38use bevy_reflect::prelude::*;
39use bevy_render_macros::ExtractComponent;
40use bevy_transform::components::{GlobalTransform, Transform};
41use bevy_window::{
42 NormalizedWindowRef, PrimaryWindow, Window, WindowCreated, WindowRef, WindowResized,
43 WindowScaleFactorChanged,
44};
45use core::ops::Range;
46use derive_more::derive::From;
47use thiserror::Error;
48use tracing::warn;
49use wgpu::{BlendState, TextureFormat, TextureUsages};
50
51#[derive(Reflect, Debug, Clone)]
57#[reflect(Default, Clone)]
58pub struct Viewport {
59 pub physical_position: UVec2,
62 pub physical_size: UVec2,
65 pub depth: Range<f32>,
67}
68
69impl Default for Viewport {
70 fn default() -> Self {
71 Self {
72 physical_position: Default::default(),
73 physical_size: UVec2::new(1, 1),
74 depth: 0.0..1.0,
75 }
76 }
77}
78
79impl Viewport {
80 pub fn clamp_to_size(&mut self, size: UVec2) {
88 if self.physical_size.x + self.physical_position.x > size.x {
91 if self.physical_position.x < size.x {
92 self.physical_size.x = size.x - self.physical_position.x;
93 } else if size.x > 0 {
94 self.physical_position.x = size.x - 1;
95 self.physical_size.x = 1;
96 } else {
97 self.physical_position.x = 0;
98 self.physical_size.x = 0;
99 }
100 }
101 if self.physical_size.y + self.physical_position.y > size.y {
102 if self.physical_position.y < size.y {
103 self.physical_size.y = size.y - self.physical_position.y;
104 } else if size.y > 0 {
105 self.physical_position.y = size.y - 1;
106 self.physical_size.y = 1;
107 } else {
108 self.physical_position.y = 0;
109 self.physical_size.y = 0;
110 }
111 }
112 }
113}
114
115#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
144#[reflect(Clone, PartialEq, Default)]
145pub struct SubCameraView {
146 pub full_size: UVec2,
148 pub offset: Vec2,
150 pub size: UVec2,
152}
153
154impl Default for SubCameraView {
155 fn default() -> Self {
156 Self {
157 full_size: UVec2::new(1, 1),
158 offset: Vec2::new(0., 0.),
159 size: UVec2::new(1, 1),
160 }
161 }
162}
163
164#[derive(Default, Debug, Clone)]
166pub struct RenderTargetInfo {
167 pub physical_size: UVec2,
169 pub scale_factor: f32,
174}
175
176#[derive(Default, Debug, Clone)]
178pub struct ComputedCameraValues {
179 clip_from_view: Mat4,
180 target_info: Option<RenderTargetInfo>,
181 old_viewport_size: Option<UVec2>,
183 old_sub_camera_view: Option<SubCameraView>,
184}
185
186#[derive(Component, Clone, Copy, Reflect)]
190#[reflect(opaque)]
191#[reflect(Component, Default, Clone)]
192pub struct Exposure {
193 pub ev100: f32,
195}
196
197impl Exposure {
198 pub const SUNLIGHT: Self = Self {
199 ev100: Self::EV100_SUNLIGHT,
200 };
201 pub const OVERCAST: Self = Self {
202 ev100: Self::EV100_OVERCAST,
203 };
204 pub const INDOOR: Self = Self {
205 ev100: Self::EV100_INDOOR,
206 };
207 pub const BLENDER: Self = Self {
212 ev100: Self::EV100_BLENDER,
213 };
214
215 pub const EV100_SUNLIGHT: f32 = 15.0;
216 pub const EV100_OVERCAST: f32 = 12.0;
217 pub const EV100_INDOOR: f32 = 7.0;
218
219 pub const EV100_BLENDER: f32 = 9.7;
224
225 pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
226 Self {
227 ev100: physical_camera_parameters.ev100(),
228 }
229 }
230
231 #[inline]
234 pub fn exposure(&self) -> f32 {
235 ops::exp2(-self.ev100) / 1.2
236 }
237}
238
239impl Default for Exposure {
240 fn default() -> Self {
241 Self::BLENDER
242 }
243}
244
245#[derive(Clone, Copy)]
248pub struct PhysicalCameraParameters {
249 pub aperture_f_stops: f32,
251 pub shutter_speed_s: f32,
253 pub sensitivity_iso: f32,
255 pub sensor_height: f32,
264}
265
266impl PhysicalCameraParameters {
267 pub fn ev100(&self) -> f32 {
269 ops::log2(
270 self.aperture_f_stops * self.aperture_f_stops * 100.0
271 / (self.shutter_speed_s * self.sensitivity_iso),
272 )
273 }
274}
275
276impl Default for PhysicalCameraParameters {
277 fn default() -> Self {
278 Self {
279 aperture_f_stops: 1.0,
280 shutter_speed_s: 1.0 / 125.0,
281 sensitivity_iso: 100.0,
282 sensor_height: 0.01866,
283 }
284 }
285}
286
287#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)]
291pub enum ViewportConversionError {
292 #[error("pre-computed size of viewport not available")]
301 NoViewportSize,
302 #[error("computed coordinate beyond `Camera`'s near plane")]
306 PastNearPlane,
307 #[error("computed coordinate beyond `Camera`'s far plane")]
311 PastFarPlane,
312 #[error("found NaN while computing NDC")]
316 InvalidData,
317}
318
319#[derive(Component, Debug, Reflect, Clone)]
335#[reflect(Component, Default, Debug, Clone)]
336#[component(on_add = warn_on_no_render_graph)]
337#[require(
338 Frustum,
339 CameraMainTextureUsages,
340 VisibleEntities,
341 Transform,
342 Visibility,
343 Msaa,
344 SyncToRenderWorld
345)]
346pub struct Camera {
347 pub viewport: Option<Viewport>,
349 pub order: isize,
351 pub is_active: bool,
354 #[reflect(ignore, clone)]
356 pub computed: ComputedCameraValues,
357 pub target: RenderTarget,
359 pub hdr: bool,
362 #[reflect(ignore, clone)]
365 pub output_mode: CameraOutputMode,
366 pub msaa_writeback: bool,
371 pub clear_color: ClearColorConfig,
373 pub sub_camera_view: Option<SubCameraView>,
375}
376
377fn warn_on_no_render_graph(world: DeferredWorld, HookContext { entity, caller, .. }: HookContext) {
378 if !world.entity(entity).contains::<CameraRenderGraph>() {
379 warn!("{}Entity {entity} has a `Camera` component, but it doesn't have a render graph configured. Consider adding a `Camera2d` or `Camera3d` component, or manually adding a `CameraRenderGraph` component if you need a custom render graph.", caller.map(|location|format!("{location}: ")).unwrap_or_default());
380 }
381}
382
383impl Default for Camera {
384 fn default() -> Self {
385 Self {
386 is_active: true,
387 order: 0,
388 viewport: None,
389 computed: Default::default(),
390 target: Default::default(),
391 output_mode: Default::default(),
392 hdr: false,
393 msaa_writeback: true,
394 clear_color: Default::default(),
395 sub_camera_view: None,
396 }
397 }
398}
399
400impl Camera {
401 #[inline]
403 pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
404 let scale = self.computed.target_info.as_ref()?.scale_factor;
405 Some(physical_size.as_vec2() / scale)
406 }
407
408 #[inline]
412 pub fn physical_viewport_rect(&self) -> Option<URect> {
413 let min = self
414 .viewport
415 .as_ref()
416 .map(|v| v.physical_position)
417 .unwrap_or(UVec2::ZERO);
418 let max = min + self.physical_viewport_size()?;
419 Some(URect { min, max })
420 }
421
422 #[inline]
426 pub fn logical_viewport_rect(&self) -> Option<Rect> {
427 let URect { min, max } = self.physical_viewport_rect()?;
428 Some(Rect {
429 min: self.to_logical(min)?,
430 max: self.to_logical(max)?,
431 })
432 }
433
434 #[inline]
448 pub fn logical_viewport_size(&self) -> Option<Vec2> {
449 self.viewport
450 .as_ref()
451 .and_then(|v| self.to_logical(v.physical_size))
452 .or_else(|| self.logical_target_size())
453 }
454
455 #[inline]
461 pub fn physical_viewport_size(&self) -> Option<UVec2> {
462 self.viewport
463 .as_ref()
464 .map(|v| v.physical_size)
465 .or_else(|| self.physical_target_size())
466 }
467
468 #[inline]
472 pub fn logical_target_size(&self) -> Option<Vec2> {
473 self.computed
474 .target_info
475 .as_ref()
476 .and_then(|t| self.to_logical(t.physical_size))
477 }
478
479 #[inline]
484 pub fn physical_target_size(&self) -> Option<UVec2> {
485 self.computed.target_info.as_ref().map(|t| t.physical_size)
486 }
487
488 #[inline]
489 pub fn target_scaling_factor(&self) -> Option<f32> {
490 self.computed
491 .target_info
492 .as_ref()
493 .map(|t: &RenderTargetInfo| t.scale_factor)
494 }
495
496 #[inline]
498 pub fn clip_from_view(&self) -> Mat4 {
499 self.computed.clip_from_view
500 }
501
502 #[doc(alias = "world_to_screen")]
512 pub fn world_to_viewport(
513 &self,
514 camera_transform: &GlobalTransform,
515 world_position: Vec3,
516 ) -> Result<Vec2, ViewportConversionError> {
517 let target_rect = self
518 .logical_viewport_rect()
519 .ok_or(ViewportConversionError::NoViewportSize)?;
520 let mut ndc_space_coords = self
521 .world_to_ndc(camera_transform, world_position)
522 .ok_or(ViewportConversionError::InvalidData)?;
523 if ndc_space_coords.z < 0.0 {
525 return Err(ViewportConversionError::PastNearPlane);
526 }
527 if ndc_space_coords.z > 1.0 {
528 return Err(ViewportConversionError::PastFarPlane);
529 }
530
531 ndc_space_coords.y = -ndc_space_coords.y;
533
534 let viewport_position =
536 (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
537 Ok(viewport_position)
538 }
539
540 #[doc(alias = "world_to_screen_with_depth")]
550 pub fn world_to_viewport_with_depth(
551 &self,
552 camera_transform: &GlobalTransform,
553 world_position: Vec3,
554 ) -> Result<Vec3, ViewportConversionError> {
555 let target_rect = self
556 .logical_viewport_rect()
557 .ok_or(ViewportConversionError::NoViewportSize)?;
558 let mut ndc_space_coords = self
559 .world_to_ndc(camera_transform, world_position)
560 .ok_or(ViewportConversionError::InvalidData)?;
561 if ndc_space_coords.z < 0.0 {
563 return Err(ViewportConversionError::PastNearPlane);
564 }
565 if ndc_space_coords.z > 1.0 {
566 return Err(ViewportConversionError::PastFarPlane);
567 }
568
569 let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);
571
572 ndc_space_coords.y = -ndc_space_coords.y;
574
575 let viewport_position =
577 (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min;
578 Ok(viewport_position.extend(depth))
579 }
580
581 pub fn viewport_to_world(
595 &self,
596 camera_transform: &GlobalTransform,
597 viewport_position: Vec2,
598 ) -> Result<Ray3d, ViewportConversionError> {
599 let target_rect = self
600 .logical_viewport_rect()
601 .ok_or(ViewportConversionError::NoViewportSize)?;
602 let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();
603 rect_relative.y = 1.0 - rect_relative.y;
605
606 let ndc = rect_relative * 2. - Vec2::ONE;
607 let ndc_to_world =
608 camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
609 let world_near_plane = ndc_to_world.project_point3(ndc.extend(1.));
610 let world_far_plane = ndc_to_world.project_point3(ndc.extend(f32::EPSILON));
612
613 Dir3::new(world_far_plane - world_near_plane)
615 .map_err(|_| ViewportConversionError::InvalidData)
616 .map(|direction| Ray3d {
617 origin: world_near_plane,
618 direction,
619 })
620 }
621
622 pub fn viewport_to_world_2d(
634 &self,
635 camera_transform: &GlobalTransform,
636 viewport_position: Vec2,
637 ) -> Result<Vec2, ViewportConversionError> {
638 let target_rect = self
639 .logical_viewport_rect()
640 .ok_or(ViewportConversionError::NoViewportSize)?;
641 let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size();
642
643 rect_relative.y = 1.0 - rect_relative.y;
645
646 let ndc = rect_relative * 2. - Vec2::ONE;
647
648 let world_near_plane = self
649 .ndc_to_world(camera_transform, ndc.extend(1.))
650 .ok_or(ViewportConversionError::InvalidData)?;
651
652 Ok(world_near_plane.truncate())
653 }
654
655 pub fn world_to_ndc(
668 &self,
669 camera_transform: &GlobalTransform,
670 world_position: Vec3,
671 ) -> Option<Vec3> {
672 let clip_from_world: Mat4 =
674 self.computed.clip_from_view * camera_transform.compute_matrix().inverse();
675 let ndc_space_coords: Vec3 = clip_from_world.project_point3(world_position);
676
677 (!ndc_space_coords.is_nan()).then_some(ndc_space_coords)
678 }
679
680 pub fn ndc_to_world(&self, camera_transform: &GlobalTransform, ndc: Vec3) -> Option<Vec3> {
694 let ndc_to_world =
696 camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
697
698 let world_space_coords = ndc_to_world.project_point3(ndc);
699
700 (!world_space_coords.is_nan()).then_some(world_space_coords)
701 }
702
703 pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
708 let near = self.clip_from_view().w_axis.z; -near / ndc_depth
710 }
711
712 pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
717 -(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
718 }
720}
721
722#[derive(Debug, Clone, Copy)]
724pub enum CameraOutputMode {
725 Write {
727 blend_state: Option<BlendState>,
729 clear_color: ClearColorConfig,
731 },
732 Skip,
739}
740
741impl Default for CameraOutputMode {
742 fn default() -> Self {
743 CameraOutputMode::Write {
744 blend_state: None,
745 clear_color: ClearColorConfig::Default,
746 }
747 }
748}
749
750#[derive(Component, Debug, Deref, DerefMut, Reflect, Clone)]
752#[reflect(opaque)]
753#[reflect(Component, Debug, Clone)]
754pub struct CameraRenderGraph(InternedRenderSubGraph);
755
756impl CameraRenderGraph {
757 #[inline]
759 pub fn new<T: RenderSubGraph>(name: T) -> Self {
760 Self(name.intern())
761 }
762
763 #[inline]
765 pub fn set<T: RenderSubGraph>(&mut self, name: T) {
766 self.0 = name.intern();
767 }
768}
769
770#[derive(Debug, Clone, Reflect, From)]
773#[reflect(Clone)]
774pub enum RenderTarget {
775 Window(WindowRef),
777 Image(ImageRenderTarget),
779 TextureView(ManualTextureViewHandle),
782}
783
784#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord)]
786#[reflect(Clone, PartialEq, Hash)]
787pub struct ImageRenderTarget {
788 pub handle: Handle<Image>,
790 pub scale_factor: FloatOrd,
793}
794
795impl From<Handle<Image>> for RenderTarget {
796 fn from(handle: Handle<Image>) -> Self {
797 Self::Image(handle.into())
798 }
799}
800
801impl From<Handle<Image>> for ImageRenderTarget {
802 fn from(handle: Handle<Image>) -> Self {
803 Self {
804 handle,
805 scale_factor: FloatOrd(1.0),
806 }
807 }
808}
809
810impl Default for RenderTarget {
811 fn default() -> Self {
812 Self::Window(Default::default())
813 }
814}
815
816#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
820#[reflect(Clone, PartialEq, Hash)]
821pub enum NormalizedRenderTarget {
822 Window(NormalizedWindowRef),
824 Image(ImageRenderTarget),
826 TextureView(ManualTextureViewHandle),
829}
830
831impl RenderTarget {
832 pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
834 match self {
835 RenderTarget::Window(window_ref) => window_ref
836 .normalize(primary_window)
837 .map(NormalizedRenderTarget::Window),
838 RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
839 RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
840 }
841 }
842
843 pub fn as_image(&self) -> Option<&Handle<Image>> {
846 if let Self::Image(image_target) = self {
847 Some(&image_target.handle)
848 } else {
849 None
850 }
851 }
852}
853
854impl NormalizedRenderTarget {
855 pub fn get_texture_view<'a>(
856 &self,
857 windows: &'a ExtractedWindows,
858 images: &'a RenderAssets<GpuImage>,
859 manual_texture_views: &'a ManualTextureViews,
860 ) -> Option<&'a TextureView> {
861 match self {
862 NormalizedRenderTarget::Window(window_ref) => windows
863 .get(&window_ref.entity())
864 .and_then(|window| window.swap_chain_texture_view.as_ref()),
865 NormalizedRenderTarget::Image(image_target) => images
866 .get(&image_target.handle)
867 .map(|image| &image.texture_view),
868 NormalizedRenderTarget::TextureView(id) => {
869 manual_texture_views.get(id).map(|tex| &tex.texture_view)
870 }
871 }
872 }
873
874 pub fn get_texture_format<'a>(
876 &self,
877 windows: &'a ExtractedWindows,
878 images: &'a RenderAssets<GpuImage>,
879 manual_texture_views: &'a ManualTextureViews,
880 ) -> Option<TextureFormat> {
881 match self {
882 NormalizedRenderTarget::Window(window_ref) => windows
883 .get(&window_ref.entity())
884 .and_then(|window| window.swap_chain_texture_format),
885 NormalizedRenderTarget::Image(image_target) => images
886 .get(&image_target.handle)
887 .map(|image| image.texture_format),
888 NormalizedRenderTarget::TextureView(id) => {
889 manual_texture_views.get(id).map(|tex| tex.format)
890 }
891 }
892 }
893
894 pub fn get_render_target_info<'a>(
895 &self,
896 resolutions: impl IntoIterator<Item = (Entity, &'a Window)>,
897 images: &Assets<Image>,
898 manual_texture_views: &ManualTextureViews,
899 ) -> Option<RenderTargetInfo> {
900 match self {
901 NormalizedRenderTarget::Window(window_ref) => resolutions
902 .into_iter()
903 .find(|(entity, _)| *entity == window_ref.entity())
904 .map(|(_, window)| RenderTargetInfo {
905 physical_size: window.physical_size(),
906 scale_factor: window.resolution.scale_factor(),
907 }),
908 NormalizedRenderTarget::Image(image_target) => {
909 let image = images.get(&image_target.handle)?;
910 Some(RenderTargetInfo {
911 physical_size: image.size(),
912 scale_factor: image_target.scale_factor.0,
913 })
914 }
915 NormalizedRenderTarget::TextureView(id) => {
916 manual_texture_views.get(id).map(|tex| RenderTargetInfo {
917 physical_size: tex.size,
918 scale_factor: 1.0,
919 })
920 }
921 }
922 }
923
924 fn is_changed(
926 &self,
927 changed_window_ids: &HashSet<Entity>,
928 changed_image_handles: &HashSet<&AssetId<Image>>,
929 ) -> bool {
930 match self {
931 NormalizedRenderTarget::Window(window_ref) => {
932 changed_window_ids.contains(&window_ref.entity())
933 }
934 NormalizedRenderTarget::Image(image_target) => {
935 changed_image_handles.contains(&image_target.handle.id())
936 }
937 NormalizedRenderTarget::TextureView(_) => true,
938 }
939 }
940}
941
942pub fn camera_system(
955 mut window_resized_events: EventReader<WindowResized>,
956 mut window_created_events: EventReader<WindowCreated>,
957 mut window_scale_factor_changed_events: EventReader<WindowScaleFactorChanged>,
958 mut image_asset_events: EventReader<AssetEvent<Image>>,
959 primary_window: Query<Entity, With<PrimaryWindow>>,
960 windows: Query<(Entity, &Window)>,
961 images: Res<Assets<Image>>,
962 manual_texture_views: Res<ManualTextureViews>,
963 mut cameras: Query<(&mut Camera, &mut Projection)>,
964) {
965 let primary_window = primary_window.iter().next();
966
967 let mut changed_window_ids = <HashSet<_>>::default();
968 changed_window_ids.extend(window_created_events.read().map(|event| event.window));
969 changed_window_ids.extend(window_resized_events.read().map(|event| event.window));
970 let scale_factor_changed_window_ids: HashSet<_> = window_scale_factor_changed_events
971 .read()
972 .map(|event| event.window)
973 .collect();
974 changed_window_ids.extend(scale_factor_changed_window_ids.clone());
975
976 let changed_image_handles: HashSet<&AssetId<Image>> = image_asset_events
977 .read()
978 .filter_map(|event| match event {
979 AssetEvent::Modified { id } | AssetEvent::Added { id } => Some(id),
980 _ => None,
981 })
982 .collect();
983
984 for (mut camera, mut camera_projection) in &mut cameras {
985 let mut viewport_size = camera
986 .viewport
987 .as_ref()
988 .map(|viewport| viewport.physical_size);
989
990 if let Some(normalized_target) = camera.target.normalize(primary_window) {
991 if normalized_target.is_changed(&changed_window_ids, &changed_image_handles)
992 || camera.is_added()
993 || camera_projection.is_changed()
994 || camera.computed.old_viewport_size != viewport_size
995 || camera.computed.old_sub_camera_view != camera.sub_camera_view
996 {
997 let new_computed_target_info = normalized_target.get_render_target_info(
998 windows,
999 &images,
1000 &manual_texture_views,
1001 );
1002 if normalized_target
1007 .is_changed(&scale_factor_changed_window_ids, &HashSet::default())
1008 {
1009 if let (Some(new_scale_factor), Some(old_scale_factor)) = (
1010 new_computed_target_info
1011 .as_ref()
1012 .map(|info| info.scale_factor),
1013 camera
1014 .computed
1015 .target_info
1016 .as_ref()
1017 .map(|info| info.scale_factor),
1018 ) {
1019 let resize_factor = new_scale_factor / old_scale_factor;
1020 if let Some(ref mut viewport) = camera.viewport {
1021 let resize = |vec: UVec2| (vec.as_vec2() * resize_factor).as_uvec2();
1022 viewport.physical_position = resize(viewport.physical_position);
1023 viewport.physical_size = resize(viewport.physical_size);
1024 viewport_size = Some(viewport.physical_size);
1025 }
1026 }
1027 }
1028 if let Some(viewport) = &mut camera.viewport {
1032 let target_info = &new_computed_target_info;
1033 if let Some(target) = target_info {
1034 viewport.clamp_to_size(target.physical_size);
1035 }
1036 }
1037 camera.computed.target_info = new_computed_target_info;
1038 if let Some(size) = camera.logical_viewport_size() {
1039 if size.x != 0.0 && size.y != 0.0 {
1040 camera_projection.update(size.x, size.y);
1041 camera.computed.clip_from_view = match &camera.sub_camera_view {
1042 Some(sub_view) => {
1043 camera_projection.get_clip_from_view_for_sub(sub_view)
1044 }
1045 None => camera_projection.get_clip_from_view(),
1046 }
1047 }
1048 }
1049 }
1050 }
1051
1052 if camera.computed.old_viewport_size != viewport_size {
1053 camera.computed.old_viewport_size = viewport_size;
1054 }
1055
1056 if camera.computed.old_sub_camera_view != camera.sub_camera_view {
1057 camera.computed.old_sub_camera_view = camera.sub_camera_view;
1058 }
1059 }
1060}
1061
1062#[derive(Component, ExtractComponent, Clone, Copy, Reflect)]
1064#[reflect(opaque)]
1065#[reflect(Component, Default, Clone)]
1066pub struct CameraMainTextureUsages(pub TextureUsages);
1067impl Default for CameraMainTextureUsages {
1068 fn default() -> Self {
1069 Self(
1070 TextureUsages::RENDER_ATTACHMENT
1071 | TextureUsages::TEXTURE_BINDING
1072 | TextureUsages::COPY_SRC,
1073 )
1074 }
1075}
1076
1077#[derive(Component, Debug)]
1078pub struct ExtractedCamera {
1079 pub target: Option<NormalizedRenderTarget>,
1080 pub physical_viewport_size: Option<UVec2>,
1081 pub physical_target_size: Option<UVec2>,
1082 pub viewport: Option<Viewport>,
1083 pub render_graph: InternedRenderSubGraph,
1084 pub order: isize,
1085 pub output_mode: CameraOutputMode,
1086 pub msaa_writeback: bool,
1087 pub clear_color: ClearColorConfig,
1088 pub sorted_camera_index_for_target: usize,
1089 pub exposure: f32,
1090 pub hdr: bool,
1091}
1092
1093pub fn extract_cameras(
1094 mut commands: Commands,
1095 query: Extract<
1096 Query<(
1097 Entity,
1098 RenderEntity,
1099 &Camera,
1100 &CameraRenderGraph,
1101 &GlobalTransform,
1102 &VisibleEntities,
1103 &Frustum,
1104 Option<&ColorGrading>,
1105 Option<&Exposure>,
1106 Option<&TemporalJitter>,
1107 Option<&RenderLayers>,
1108 Option<&Projection>,
1109 Has<NoIndirectDrawing>,
1110 )>,
1111 >,
1112 primary_window: Extract<Query<Entity, With<PrimaryWindow>>>,
1113 gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
1114 mapper: Extract<Query<&RenderEntity>>,
1115) {
1116 let primary_window = primary_window.iter().next();
1117 for (
1118 main_entity,
1119 render_entity,
1120 camera,
1121 camera_render_graph,
1122 transform,
1123 visible_entities,
1124 frustum,
1125 color_grading,
1126 exposure,
1127 temporal_jitter,
1128 render_layers,
1129 projection,
1130 no_indirect_drawing,
1131 ) in query.iter()
1132 {
1133 if !camera.is_active {
1134 commands.entity(render_entity).remove::<(
1135 ExtractedCamera,
1136 ExtractedView,
1137 RenderVisibleEntities,
1138 TemporalJitter,
1139 RenderLayers,
1140 Projection,
1141 NoIndirectDrawing,
1142 ViewUniformOffset,
1143 )>();
1144 continue;
1145 }
1146
1147 let color_grading = color_grading.unwrap_or(&ColorGrading::default()).clone();
1148
1149 if let (
1150 Some(URect {
1151 min: viewport_origin,
1152 ..
1153 }),
1154 Some(viewport_size),
1155 Some(target_size),
1156 ) = (
1157 camera.physical_viewport_rect(),
1158 camera.physical_viewport_size(),
1159 camera.physical_target_size(),
1160 ) {
1161 if target_size.x == 0 || target_size.y == 0 {
1162 continue;
1163 }
1164
1165 let render_visible_entities = RenderVisibleEntities {
1166 entities: visible_entities
1167 .entities
1168 .iter()
1169 .map(|(type_id, entities)| {
1170 let entities = entities
1171 .iter()
1172 .map(|entity| {
1173 let render_entity = mapper
1174 .get(*entity)
1175 .cloned()
1176 .map(|entity| entity.id())
1177 .unwrap_or(Entity::PLACEHOLDER);
1178 (render_entity, (*entity).into())
1179 })
1180 .collect();
1181 (*type_id, entities)
1182 })
1183 .collect(),
1184 };
1185
1186 let mut commands = commands.entity(render_entity);
1187 commands.insert((
1188 ExtractedCamera {
1189 target: camera.target.normalize(primary_window),
1190 viewport: camera.viewport.clone(),
1191 physical_viewport_size: Some(viewport_size),
1192 physical_target_size: Some(target_size),
1193 render_graph: camera_render_graph.0,
1194 order: camera.order,
1195 output_mode: camera.output_mode,
1196 msaa_writeback: camera.msaa_writeback,
1197 clear_color: camera.clear_color,
1198 sorted_camera_index_for_target: 0,
1200 exposure: exposure
1201 .map(Exposure::exposure)
1202 .unwrap_or_else(|| Exposure::default().exposure()),
1203 hdr: camera.hdr,
1204 },
1205 ExtractedView {
1206 retained_view_entity: RetainedViewEntity::new(main_entity.into(), None, 0),
1207 clip_from_view: camera.clip_from_view(),
1208 world_from_view: *transform,
1209 clip_from_world: None,
1210 hdr: camera.hdr,
1211 viewport: UVec4::new(
1212 viewport_origin.x,
1213 viewport_origin.y,
1214 viewport_size.x,
1215 viewport_size.y,
1216 ),
1217 color_grading,
1218 },
1219 render_visible_entities,
1220 *frustum,
1221 ));
1222
1223 if let Some(temporal_jitter) = temporal_jitter {
1224 commands.insert(temporal_jitter.clone());
1225 }
1226
1227 if let Some(render_layers) = render_layers {
1228 commands.insert(render_layers.clone());
1229 }
1230
1231 if let Some(perspective) = projection {
1232 commands.insert(perspective.clone());
1233 }
1234
1235 if no_indirect_drawing
1236 || !matches!(
1237 gpu_preprocessing_support.max_supported_mode,
1238 GpuPreprocessingMode::Culling
1239 )
1240 {
1241 commands.insert(NoIndirectDrawing);
1242 }
1243 };
1244 }
1245}
1246
1247#[derive(Resource, Default)]
1249pub struct SortedCameras(pub Vec<SortedCamera>);
1250
1251pub struct SortedCamera {
1252 pub entity: Entity,
1253 pub order: isize,
1254 pub target: Option<NormalizedRenderTarget>,
1255 pub hdr: bool,
1256}
1257
1258pub fn sort_cameras(
1259 mut sorted_cameras: ResMut<SortedCameras>,
1260 mut cameras: Query<(Entity, &mut ExtractedCamera)>,
1261) {
1262 sorted_cameras.0.clear();
1263 for (entity, camera) in cameras.iter() {
1264 sorted_cameras.0.push(SortedCamera {
1265 entity,
1266 order: camera.order,
1267 target: camera.target.clone(),
1268 hdr: camera.hdr,
1269 });
1270 }
1271 sorted_cameras
1273 .0
1274 .sort_by(|c1, c2| (c1.order, &c1.target).cmp(&(c2.order, &c2.target)));
1275 let mut previous_order_target = None;
1276 let mut ambiguities = <HashSet<_>>::default();
1277 let mut target_counts = <HashMap<_, _>>::default();
1278 for sorted_camera in &mut sorted_cameras.0 {
1279 let new_order_target = (sorted_camera.order, sorted_camera.target.clone());
1280 if let Some(previous_order_target) = previous_order_target {
1281 if previous_order_target == new_order_target {
1282 ambiguities.insert(new_order_target.clone());
1283 }
1284 }
1285 if let Some(target) = &sorted_camera.target {
1286 let count = target_counts
1287 .entry((target.clone(), sorted_camera.hdr))
1288 .or_insert(0usize);
1289 let (_, mut camera) = cameras.get_mut(sorted_camera.entity).unwrap();
1290 camera.sorted_camera_index_for_target = *count;
1291 *count += 1;
1292 }
1293 previous_order_target = Some(new_order_target);
1294 }
1295
1296 if !ambiguities.is_empty() {
1297 warn!(
1298 "Camera order ambiguities detected for active cameras with the following priorities: {:?}. \
1299 To fix this, ensure there is exactly one Camera entity spawned with a given order for a given RenderTarget. \
1300 Ambiguities should be resolved because either (1) multiple active cameras were spawned accidentally, which will \
1301 result in rendering multiple instances of the scene or (2) for cases where multiple active cameras is intentional, \
1302 ambiguities could result in unpredictable render results.",
1303 ambiguities
1304 );
1305 }
1306}
1307
1308#[derive(Component, Clone, Default, Reflect)]
1316#[reflect(Default, Component, Clone)]
1317pub struct TemporalJitter {
1318 pub offset: Vec2,
1320}
1321
1322impl TemporalJitter {
1323 pub fn jitter_projection(&self, clip_from_view: &mut Mat4, view_size: Vec2) {
1324 if clip_from_view.w_axis.w == 1.0 {
1325 warn!(
1326 "TemporalJitter not supported with OrthographicProjection. Use PerspectiveProjection instead."
1327 );
1328 return;
1329 }
1330
1331 let jitter = (self.offset * vec2(2.0, -2.0)) / view_size;
1333
1334 clip_from_view.z_axis.x += jitter.x;
1335 clip_from_view.z_axis.y += jitter.y;
1336 }
1337}
1338
1339#[derive(Default, Component, Reflect)]
1343#[reflect(Default, Component)]
1344pub struct MipBias(pub f32);