nalgebra/geometry/scale.rs
1// Needed otherwise the rkyv macros generate code incompatible with rust-2024
2#![cfg_attr(feature = "rkyv-serialize", allow(unsafe_op_in_unsafe_fn))]
3
4use approx::{AbsDiffEq, RelativeEq, UlpsEq};
5use num::{One, Zero};
6use std::fmt;
7use std::hash;
8
9#[cfg(feature = "serde-serialize-no-std")]
10use serde::{Deserialize, Deserializer, Serialize, Serializer};
11
12use crate::ClosedDivAssign;
13use crate::ClosedMulAssign;
14use crate::base::allocator::Allocator;
15use crate::base::dimension::{DimNameAdd, DimNameSum, U1};
16use crate::base::storage::Owned;
17use crate::base::{Const, DefaultAllocator, OMatrix, OVector, SVector, Scalar};
18
19use crate::geometry::Point;
20
21#[cfg(feature = "rkyv-serialize")]
22use rkyv::bytecheck;
23
24/// A scale which supports non-uniform scaling.
25#[repr(C)]
26#[cfg_attr(
27 feature = "rkyv-serialize-no-std",
28 derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize),
29 archive(
30 as = "Scale<T::Archived, D>",
31 bound(archive = "
32 T: rkyv::Archive,
33 SVector<T, D>: rkyv::Archive<Archived = SVector<T::Archived, D>>
34 ")
35 )
36)]
37#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
38#[cfg_attr(feature = "defmt", derive(defmt::Format))]
39#[derive(Copy, Clone)]
40pub struct Scale<T, const D: usize> {
41 /// The scale coordinates, i.e., how much is multiplied to a point's coordinates when it is
42 /// scaled.
43 pub vector: SVector<T, D>,
44}
45
46impl<T: fmt::Debug, const D: usize> fmt::Debug for Scale<T, D> {
47 fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
48 self.vector.as_slice().fmt(formatter)
49 }
50}
51
52impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Scale<T, D>
53where
54 Owned<T, Const<D>>: hash::Hash,
55{
56 fn hash<H: hash::Hasher>(&self, state: &mut H) {
57 self.vector.hash(state)
58 }
59}
60
61#[cfg(feature = "bytemuck")]
62unsafe impl<T, const D: usize> bytemuck::Zeroable for Scale<T, D>
63where
64 T: Scalar + bytemuck::Zeroable,
65 SVector<T, D>: bytemuck::Zeroable,
66{
67}
68
69#[cfg(feature = "bytemuck")]
70unsafe impl<T, const D: usize> bytemuck::Pod for Scale<T, D>
71where
72 T: Scalar + bytemuck::Pod,
73 SVector<T, D>: bytemuck::Pod,
74{
75}
76
77#[cfg(feature = "serde-serialize-no-std")]
78impl<T: Scalar, const D: usize> Serialize for Scale<T, D>
79where
80 Owned<T, Const<D>>: Serialize,
81{
82 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
83 where
84 S: Serializer,
85 {
86 self.vector.serialize(serializer)
87 }
88}
89
90#[cfg(feature = "serde-serialize-no-std")]
91impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Scale<T, D>
92where
93 Owned<T, Const<D>>: Deserialize<'a>,
94{
95 fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
96 where
97 Des: Deserializer<'a>,
98 {
99 let matrix = SVector::<T, D>::deserialize(deserializer)?;
100
101 Ok(Scale::from(matrix))
102 }
103}
104
105impl<T: Scalar, const D: usize> Scale<T, D> {
106 /// Inverts `self`.
107 ///
108 /// # Example
109 /// ```
110 /// # use nalgebra::{Scale2, Scale3};
111 /// let t = Scale3::new(1.0, 2.0, 3.0);
112 /// assert_eq!(t * t.try_inverse().unwrap(), Scale3::identity());
113 /// assert_eq!(t.try_inverse().unwrap() * t, Scale3::identity());
114 ///
115 /// // Work in all dimensions.
116 /// let t = Scale2::new(1.0, 2.0);
117 /// assert_eq!(t * t.try_inverse().unwrap(), Scale2::identity());
118 /// assert_eq!(t.try_inverse().unwrap() * t, Scale2::identity());
119 ///
120 /// // Returns None if any coordinate is 0.
121 /// let t = Scale2::new(0.0, 2.0);
122 /// assert_eq!(t.try_inverse(), None);
123 /// ```
124 #[inline]
125 #[must_use = "Did you mean to use try_inverse_mut()?"]
126 pub fn try_inverse(&self) -> Option<Scale<T, D>>
127 where
128 T: ClosedDivAssign + One + Zero,
129 {
130 for i in 0..D {
131 if self.vector[i] == T::zero() {
132 return None;
133 }
134 }
135 Some(self.vector.map(|e| T::one() / e).into())
136 }
137
138 /// Inverts `self`.
139 ///
140 /// # Example
141 /// ```
142 /// # use nalgebra::{Scale2, Scale3};
143 ///
144 /// unsafe {
145 /// let t = Scale3::new(1.0, 2.0, 3.0);
146 /// assert_eq!(t * t.inverse_unchecked(), Scale3::identity());
147 /// assert_eq!(t.inverse_unchecked() * t, Scale3::identity());
148 ///
149 /// // Work in all dimensions.
150 /// let t = Scale2::new(1.0, 2.0);
151 /// assert_eq!(t * t.inverse_unchecked(), Scale2::identity());
152 /// assert_eq!(t.inverse_unchecked() * t, Scale2::identity());
153 /// }
154 /// ```
155 ///
156 /// # Safety
157 ///
158 /// Should only be used if all scaling is known to be non-zero.
159 #[inline]
160 #[must_use]
161 pub unsafe fn inverse_unchecked(&self) -> Scale<T, D>
162 where
163 T: ClosedDivAssign + One,
164 {
165 self.vector.map(|e| T::one() / e).into()
166 }
167
168 /// Inverts `self`.
169 ///
170 /// # Example
171 /// ```
172 /// # use nalgebra::{Scale2, Scale3};
173 /// let t = Scale3::new(1.0, 2.0, 3.0);
174 /// assert_eq!(t * t.pseudo_inverse(), Scale3::identity());
175 /// assert_eq!(t.pseudo_inverse() * t, Scale3::identity());
176 ///
177 /// // Work in all dimensions.
178 /// let t = Scale2::new(1.0, 2.0);
179 /// assert_eq!(t * t.pseudo_inverse(), Scale2::identity());
180 /// assert_eq!(t.pseudo_inverse() * t, Scale2::identity());
181 ///
182 /// // Inverts only non-zero coordinates.
183 /// let t = Scale2::new(0.0, 2.0);
184 /// assert_eq!(t * t.pseudo_inverse(), Scale2::new(0.0, 1.0));
185 /// assert_eq!(t.pseudo_inverse() * t, Scale2::new(0.0, 1.0));
186 /// ```
187 #[inline]
188 #[must_use]
189 pub fn pseudo_inverse(&self) -> Scale<T, D>
190 where
191 T: ClosedDivAssign + One + Zero,
192 {
193 self.vector
194 .map(|e| {
195 if e != T::zero() {
196 T::one() / e
197 } else {
198 T::zero()
199 }
200 })
201 .into()
202 }
203
204 /// Converts this Scale into its equivalent homogeneous transformation matrix.
205 ///
206 /// # Example
207 /// ```
208 /// # use nalgebra::{Scale2, Scale3, Matrix3, Matrix4};
209 /// let t = Scale3::new(10.0, 20.0, 30.0);
210 /// let expected = Matrix4::new(10.0, 0.0, 0.0, 0.0,
211 /// 0.0, 20.0, 0.0, 0.0,
212 /// 0.0, 0.0, 30.0, 0.0,
213 /// 0.0, 0.0, 0.0, 1.0);
214 /// assert_eq!(t.to_homogeneous(), expected);
215 ///
216 /// let t = Scale2::new(10.0, 20.0);
217 /// let expected = Matrix3::new(10.0, 0.0, 0.0,
218 /// 0.0, 20.0, 0.0,
219 /// 0.0, 0.0, 1.0);
220 /// assert_eq!(t.to_homogeneous(), expected);
221 /// ```
222 #[inline]
223 #[must_use]
224 pub fn to_homogeneous(&self) -> OMatrix<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
225 where
226 T: Zero + One + Clone,
227 Const<D>: DimNameAdd<U1>,
228 DefaultAllocator: Allocator<DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
229 + Allocator<DimNameSum<Const<D>, U1>, U1>,
230 {
231 // TODO: use self.vector.push() instead. We can’t right now because
232 // that would require the DimAdd bound (but here we use DimNameAdd).
233 // This should be fixable once Rust gets a more complete support of
234 // const-generics.
235 let mut v = OVector::from_element(T::one());
236 for i in 0..D {
237 v[i] = self.vector[i].clone();
238 }
239 OMatrix::from_diagonal(&v)
240 }
241
242 /// Inverts `self` in-place.
243 ///
244 /// # Example
245 /// ```
246 /// # use nalgebra::{Scale2, Scale3};
247 /// let t = Scale3::new(1.0, 2.0, 3.0);
248 /// let mut inv_t = Scale3::new(1.0, 2.0, 3.0);
249 /// assert!(inv_t.try_inverse_mut());
250 /// assert_eq!(t * inv_t, Scale3::identity());
251 /// assert_eq!(inv_t * t, Scale3::identity());
252 ///
253 /// // Work in all dimensions.
254 /// let t = Scale2::new(1.0, 2.0);
255 /// let mut inv_t = Scale2::new(1.0, 2.0);
256 /// assert!(inv_t.try_inverse_mut());
257 /// assert_eq!(t * inv_t, Scale2::identity());
258 /// assert_eq!(inv_t * t, Scale2::identity());
259 ///
260 /// // Does not perform any operation if a coordinate is 0.
261 /// let mut t = Scale2::new(0.0, 2.0);
262 /// assert!(!t.try_inverse_mut());
263 /// ```
264 #[inline]
265 pub fn try_inverse_mut(&mut self) -> bool
266 where
267 T: ClosedDivAssign + One + Zero,
268 {
269 match self.try_inverse() {
270 Some(v) => {
271 self.vector = v.vector;
272 true
273 }
274 None => false,
275 }
276 }
277}
278
279impl<T: Scalar + ClosedMulAssign, const D: usize> Scale<T, D> {
280 /// Translate the given point.
281 ///
282 /// This is the same as the multiplication `self * pt`.
283 ///
284 /// # Example
285 /// ```
286 /// # use nalgebra::{Scale3, Point3};
287 /// let t = Scale3::new(1.0, 2.0, 3.0);
288 /// let transformed_point = t.transform_point(&Point3::new(4.0, 5.0, 6.0));
289 /// assert_eq!(transformed_point, Point3::new(4.0, 10.0, 18.0));
290 /// ```
291 #[inline]
292 #[must_use]
293 pub fn transform_point(&self, pt: &Point<T, D>) -> Point<T, D> {
294 self * pt
295 }
296}
297
298impl<T: Scalar + ClosedDivAssign + ClosedMulAssign + One + Zero, const D: usize> Scale<T, D> {
299 /// Translate the given point by the inverse of this Scale.
300 ///
301 /// # Example
302 /// ```
303 /// # use nalgebra::{Scale3, Point3};
304 /// let t = Scale3::new(1.0, 2.0, 3.0);
305 /// let transformed_point = t.try_inverse_transform_point(&Point3::new(4.0, 6.0, 6.0)).unwrap();
306 /// assert_eq!(transformed_point, Point3::new(4.0, 3.0, 2.0));
307 ///
308 /// // Returns None if the inverse doesn't exist.
309 /// let t = Scale3::new(1.0, 0.0, 3.0);
310 /// let transformed_point = t.try_inverse_transform_point(&Point3::new(4.0, 6.0, 6.0));
311 /// assert_eq!(transformed_point, None);
312 /// ```
313 #[inline]
314 #[must_use]
315 pub fn try_inverse_transform_point(&self, pt: &Point<T, D>) -> Option<Point<T, D>> {
316 self.try_inverse().map(|s| s * pt)
317 }
318}
319
320impl<T: Scalar + Eq, const D: usize> Eq for Scale<T, D> {}
321
322impl<T: Scalar + PartialEq, const D: usize> PartialEq for Scale<T, D> {
323 #[inline]
324 fn eq(&self, right: &Scale<T, D>) -> bool {
325 self.vector == right.vector
326 }
327}
328
329impl<T: Scalar + AbsDiffEq, const D: usize> AbsDiffEq for Scale<T, D>
330where
331 T::Epsilon: Clone,
332{
333 type Epsilon = T::Epsilon;
334
335 #[inline]
336 fn default_epsilon() -> Self::Epsilon {
337 T::default_epsilon()
338 }
339
340 #[inline]
341 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
342 self.vector.abs_diff_eq(&other.vector, epsilon)
343 }
344}
345
346impl<T: Scalar + RelativeEq, const D: usize> RelativeEq for Scale<T, D>
347where
348 T::Epsilon: Clone,
349{
350 #[inline]
351 fn default_max_relative() -> Self::Epsilon {
352 T::default_max_relative()
353 }
354
355 #[inline]
356 fn relative_eq(
357 &self,
358 other: &Self,
359 epsilon: Self::Epsilon,
360 max_relative: Self::Epsilon,
361 ) -> bool {
362 self.vector
363 .relative_eq(&other.vector, epsilon, max_relative)
364 }
365}
366
367impl<T: Scalar + UlpsEq, const D: usize> UlpsEq for Scale<T, D>
368where
369 T::Epsilon: Clone,
370{
371 #[inline]
372 fn default_max_ulps() -> u32 {
373 T::default_max_ulps()
374 }
375
376 #[inline]
377 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
378 self.vector.ulps_eq(&other.vector, epsilon, max_ulps)
379 }
380}
381
382/*
383 *
384 * Display
385 *
386 */
387impl<T: Scalar + fmt::Display, const D: usize> fmt::Display for Scale<T, D> {
388 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
389 let precision = f.precision().unwrap_or(3);
390
391 writeln!(f, "Scale {{")?;
392 write!(f, "{:.*}", precision, self.vector)?;
393 writeln!(f, "}}")
394 }
395}