trackball/
image.rs

1use crate::{Frame, Scope};
2use nalgebra::{Isometry3, Matrix4, Point2, Point3, RealField, Vector2, Vector3, convert, zero};
3use simba::scalar::SubsetOf;
4
5/// Image as projection of [`Scope`] wrt [`Frame`].
6#[derive(Debug, Clone, Copy, PartialEq, Eq)]
7#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
8pub struct Image<N: Copy + RealField> {
9	/// Current position in screen space of hovering input or pointing device.
10	pos: Point2<N>,
11	/// Maximum position in screen space as screen's width and height.
12	max: Point2<N>,
13	/// Cached unit per pixel on focus plane to scale/project positions/vectors onto focus plane.
14	upp: N,
15	/// Cached previous frame.
16	frame: Frame<N>,
17	/// Cached previous scope.
18	scope: Scope<N>,
19	/// Cached view isometry from world to camera space coinciding with right-handed look-at space.
20	view_iso: Isometry3<N>,
21	/// Cached homogeneous view matrix computed from view isometry.
22	view_mat: Matrix4<N>,
23	/// Cached scale-identical orthographic or perspective projection matrix.
24	proj_mat: Matrix4<N>,
25	/// Cached transformation.
26	proj_view_mat: Matrix4<N>,
27	/// Cached inverse of transformation.
28	proj_view_inv: Matrix4<N>,
29	/// Whether to compute transformation. Default is `true`.
30	compute_mat: bool,
31	/// Whether to compute inverse transformation. Default is `true`.
32	compute_inv: bool,
33	/// Whether to use passive transformations. Default is `false`.
34	use_passive: bool,
35}
36
37impl<N: Copy + RealField> Image<N> {
38	/// Computes initial transformations from frame, scope, and screen's width and height.
39	#[must_use]
40	pub fn new(frame: &Frame<N>, scope: &Scope<N>, max: Point2<N>) -> Self {
41		let mut image = Self {
42			pos: Point2::origin(),
43			max,
44			upp: zero(),
45			frame: *frame,
46			scope: *scope,
47			view_iso: Isometry3::identity(),
48			view_mat: zero(),
49			proj_mat: zero(),
50			proj_view_mat: zero(),
51			proj_view_inv: zero(),
52			compute_mat: true,
53			compute_inv: true,
54			use_passive: false,
55		};
56		if frame.distance() != N::zero() {
57			image.compute_view(frame);
58			image.compute_projection_and_upp(frame.distance(), scope);
59			image.compute_transformation();
60			image.compute_inverse_transformation();
61		}
62		image
63	}
64	/// Recomputes only cached matrices whose parameters have changed, see [`Self::set_compute()`].
65	///
66	/// Returns `Some(true)` on success, `Some(false)` on failure, and `None` with no changes.
67	#[allow(clippy::useless_let_if_seq)]
68	pub fn compute(&mut self, frame: Frame<N>, scope: Scope<N>) -> Option<bool> {
69		let mut compute = false;
70		if self.frame != frame {
71			self.compute_view(&frame);
72			compute = true;
73		}
74		if self.frame.distance() != frame.distance() || self.scope != scope {
75			self.compute_projection_and_upp(frame.distance(), &scope);
76			compute = true;
77		}
78		self.frame = frame;
79		self.scope = scope;
80		compute.then(|| {
81			if self.compute_mat || self.compute_inv {
82				self.compute_transformation();
83			}
84			if self.compute_inv {
85				self.compute_inverse_transformation()
86			} else {
87				true
88			}
89		})
90	}
91	/// Sets whether to compute transformation and inverse transformation with [`Self::compute()`].
92	///
93	/// Default is `(true, true)`.
94	pub const fn set_compute(&mut self, compute_mat: bool, compute_inv: bool) {
95		self.compute_mat = compute_mat;
96		self.compute_inv = compute_inv;
97	}
98	/// Sets whether to use passive transformations.
99	///
100	/// Default is `false`.
101	pub const fn set_passive(&mut self, use_passive: bool) {
102		self.use_passive = use_passive;
103	}
104	/// Current position in screen space of hovering input or pointing device.
105	#[must_use]
106	pub const fn pos(&self) -> &Point2<N> {
107		&self.pos
108	}
109	/// Sets current position in screen space of hovering input or pointing device.
110	pub const fn set_pos(&mut self, pos: Point2<N>) {
111		self.pos = pos;
112	}
113	/// Maximum position in screen space as screen's width and height.
114	#[must_use]
115	pub const fn max(&self) -> &Point2<N> {
116		&self.max
117	}
118	/// Sets maximum position in screen space as screen's width and height.
119	pub fn set_max(&mut self, max: Point2<N>) {
120		// Let `Self::compute()` recompute projection matrix by invalidating cached previous scope.
121		if self.max != max {
122			self.scope.set_fov(N::zero());
123		}
124		self.max = max;
125	}
126	/// Cached unit per pixel on focus plane to scale/project positions/vectors onto focus plane.
127	#[must_use]
128	pub const fn upp(&self) -> N {
129		self.upp
130	}
131	/// Cached view isometry.
132	#[must_use]
133	pub const fn view_isometry(&self) -> &Isometry3<N> {
134		&self.view_iso
135	}
136	/// Cached view matrix.
137	#[must_use]
138	pub const fn view(&self) -> &Matrix4<N> {
139		&self.view_mat
140	}
141	/// Computes view isometry and matrix from frame wrt camera eye and target.
142	pub fn compute_view(&mut self, frame: &Frame<N>) {
143		self.view_iso = if self.use_passive {
144			frame.inverse_view()
145		} else {
146			frame.view()
147		};
148		self.view_mat = self.view_iso.to_homogeneous();
149	}
150	/// Cached projection matrix.
151	#[must_use]
152	pub const fn projection(&self) -> &Matrix4<N> {
153		&self.proj_mat
154	}
155	/// Computes projection matrix and unit per pixel on focus plane.
156	pub fn compute_projection_and_upp(&mut self, zat: N, scope: &Scope<N>) {
157		let (mat, upp) = scope.projection_and_upp(zat, &self.max);
158		self.upp = upp;
159		self.proj_mat = mat;
160	}
161	/// Cached projection view matrix.
162	#[must_use]
163	pub const fn transformation(&self) -> &Matrix4<N> {
164		&self.proj_view_mat
165	}
166	/// Computes projection view matrix.
167	pub fn compute_transformation(&mut self) {
168		self.proj_view_mat = self.proj_mat * self.view_mat;
169	}
170	/// Cached inverse projection view matrix.
171	#[must_use]
172	pub const fn inverse_transformation(&self) -> &Matrix4<N> {
173		&self.proj_view_inv
174	}
175	/// Computes inverse of projection view matrix.
176	///
177	/// Returns `true` on success.
178	pub fn compute_inverse_transformation(&mut self) -> bool {
179		let inv = self.proj_view_mat.try_inverse();
180		if let Some(mat) = inv {
181			self.proj_view_inv = mat;
182		}
183		inv.is_some()
184	}
185	/// Clamps position in screen space wrt its maximum in screen space.
186	#[must_use]
187	pub fn clamp_pos_wrt_max(pos: &Point2<N>, max: &Point2<N>) -> Point2<N> {
188		Point2::new(pos.x.clamp(N::zero(), max.x), pos.y.clamp(N::zero(), max.y))
189	}
190	/// Clamps position in screen space.
191	#[must_use]
192	pub fn clamp_pos(&self, pos: &Point2<N>) -> Point2<N> {
193		Self::clamp_pos_wrt_max(pos, &self.max)
194	}
195	/// Transforms position and its maximum from screen to camera space wrt its maximum.
196	#[must_use]
197	pub fn transform_pos_and_max_wrt_max(
198		pos: &Point2<N>,
199		max: &Point2<N>,
200	) -> (Point2<N>, Point2<N>) {
201		let max = max * convert(0.5);
202		(Point2::new(pos.x - max.x, max.y - pos.y), max)
203	}
204	/// Transforms position from screen to camera space.
205	#[must_use]
206	pub fn transform_pos(&self, pos: &Point2<N>) -> Point2<N> {
207		Self::transform_pos_and_max_wrt_max(pos, &self.max).0
208	}
209	/// Transforms vector from screen to camera space.
210	#[must_use]
211	pub fn transform_vec(pos: &Vector2<N>) -> Vector2<N> {
212		Vector2::new(pos.x, -pos.y)
213	}
214	/// Transforms position from screen to camera space and projects it onto focus plane.
215	#[must_use]
216	pub fn project_pos(&self, pos: &Point2<N>) -> Point3<N> {
217		self.transform_pos(pos)
218			.coords
219			.scale(self.upp)
220			.push(N::zero())
221			.into()
222	}
223	/// Transforms vector from screen to camera space and projects it onto focus plane.
224	#[must_use]
225	pub fn project_vec(&self, vec: &Vector2<N>) -> Vector3<N> {
226		Self::transform_vec(vec).scale(self.upp).push(N::zero())
227	}
228	/// Casts components to another type, e.g., between [`f32`] and [`f64`].
229	#[must_use]
230	pub fn cast<M: Copy + RealField>(self) -> Image<M>
231	where
232		N: SubsetOf<M>,
233	{
234		Image {
235			pos: self.pos.cast(),
236			max: self.max.cast(),
237			upp: self.upp.to_superset(),
238			frame: self.frame.cast(),
239			scope: self.scope.cast(),
240			view_iso: self.view_iso.cast(),
241			view_mat: self.view_mat.cast(),
242			proj_mat: self.proj_mat.cast(),
243			proj_view_mat: self.proj_view_mat.cast(),
244			proj_view_inv: self.proj_view_inv.cast(),
245			compute_mat: self.compute_mat,
246			compute_inv: self.compute_inv,
247			use_passive: self.use_passive,
248		}
249	}
250}
251
252#[cfg(feature = "rkyv")]
253impl<N: Copy + RealField> rkyv::Archive for Image<N> {
254	type Archived = Self;
255	type Resolver = ();
256
257	#[inline]
258	#[allow(unsafe_code)]
259	unsafe fn resolve(&self, _: usize, (): Self::Resolver, out: *mut Self::Archived) {
260		unsafe {
261			out.write(rkyv::to_archived!(*self as Self));
262		}
263	}
264}
265
266#[cfg(feature = "rkyv")]
267impl<Ser: rkyv::Fallible + ?Sized, N: Copy + RealField> rkyv::Serialize<Ser> for Image<N> {
268	#[inline]
269	fn serialize(&self, _: &mut Ser) -> Result<Self::Resolver, Ser::Error> {
270		Ok(())
271	}
272}
273
274#[cfg(feature = "rkyv")]
275impl<De: rkyv::Fallible + ?Sized, N: Copy + RealField> rkyv::Deserialize<Self, De> for Image<N> {
276	#[inline]
277	fn deserialize(&self, _: &mut De) -> Result<Self, De::Error> {
278		Ok(rkyv::from_archived!(*self))
279	}
280}