glam/f32/sse2/
vec4.rs

1// Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3use crate::{f32::math, sse2::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[cfg(target_arch = "x86")]
10use core::arch::x86::*;
11#[cfg(target_arch = "x86_64")]
12use core::arch::x86_64::*;
13
14#[repr(C)]
15union UnionCast {
16    a: [f32; 4],
17    v: Vec4,
18}
19
20/// Creates a 4-dimensional vector.
21#[inline(always)]
22#[must_use]
23pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
24    Vec4::new(x, y, z, w)
25}
26
27/// A 4-dimensional vector.
28///
29/// SIMD vector types are used for storage on supported platforms.
30///
31/// This type is 16 byte aligned.
32#[derive(Clone, Copy)]
33#[repr(transparent)]
34pub struct Vec4(pub(crate) __m128);
35
36impl Vec4 {
37    /// All zeroes.
38    pub const ZERO: Self = Self::splat(0.0);
39
40    /// All ones.
41    pub const ONE: Self = Self::splat(1.0);
42
43    /// All negative ones.
44    pub const NEG_ONE: Self = Self::splat(-1.0);
45
46    /// All `f32::MIN`.
47    pub const MIN: Self = Self::splat(f32::MIN);
48
49    /// All `f32::MAX`.
50    pub const MAX: Self = Self::splat(f32::MAX);
51
52    /// All `f32::NAN`.
53    pub const NAN: Self = Self::splat(f32::NAN);
54
55    /// All `f32::INFINITY`.
56    pub const INFINITY: Self = Self::splat(f32::INFINITY);
57
58    /// All `f32::NEG_INFINITY`.
59    pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
60
61    /// A unit vector pointing along the positive X axis.
62    pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
63
64    /// A unit vector pointing along the positive Y axis.
65    pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
66
67    /// A unit vector pointing along the positive Z axis.
68    pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
69
70    /// A unit vector pointing along the positive W axis.
71    pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
72
73    /// A unit vector pointing along the negative X axis.
74    pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
75
76    /// A unit vector pointing along the negative Y axis.
77    pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
78
79    /// A unit vector pointing along the negative Z axis.
80    pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
81
82    /// A unit vector pointing along the negative W axis.
83    pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
84
85    /// The unit axes.
86    pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
87
88    /// Creates a new vector.
89    #[inline(always)]
90    #[must_use]
91    pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
92        unsafe { UnionCast { a: [x, y, z, w] }.v }
93    }
94
95    /// Creates a vector with all elements set to `v`.
96    #[inline]
97    #[must_use]
98    pub const fn splat(v: f32) -> Self {
99        unsafe { UnionCast { a: [v; 4] }.v }
100    }
101
102    /// Returns a vector containing each element of `self` modified by a mapping function `f`.
103    #[inline]
104    #[must_use]
105    pub fn map<F>(self, f: F) -> Self
106    where
107        F: Fn(f32) -> f32,
108    {
109        Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
110    }
111
112    /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
113    /// for each element of `self`.
114    ///
115    /// A true element in the mask uses the corresponding element from `if_true`, and false
116    /// uses the element from `if_false`.
117    #[inline]
118    #[must_use]
119    pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
120        Self(unsafe {
121            _mm_or_ps(
122                _mm_andnot_ps(mask.0, if_false.0),
123                _mm_and_ps(if_true.0, mask.0),
124            )
125        })
126    }
127
128    /// Creates a new vector from an array.
129    #[inline]
130    #[must_use]
131    pub const fn from_array(a: [f32; 4]) -> Self {
132        Self::new(a[0], a[1], a[2], a[3])
133    }
134
135    /// `[x, y, z, w]`
136    #[inline]
137    #[must_use]
138    pub const fn to_array(&self) -> [f32; 4] {
139        unsafe { *(self as *const Vec4 as *const [f32; 4]) }
140    }
141
142    /// Creates a vector from the first 4 values in `slice`.
143    ///
144    /// # Panics
145    ///
146    /// Panics if `slice` is less than 4 elements long.
147    #[inline]
148    #[must_use]
149    pub const fn from_slice(slice: &[f32]) -> Self {
150        assert!(slice.len() >= 4);
151        Self::new(slice[0], slice[1], slice[2], slice[3])
152    }
153
154    /// Writes the elements of `self` to the first 4 elements in `slice`.
155    ///
156    /// # Panics
157    ///
158    /// Panics if `slice` is less than 4 elements long.
159    #[inline]
160    pub fn write_to_slice(self, slice: &mut [f32]) {
161        assert!(slice.len() >= 4);
162        unsafe {
163            _mm_storeu_ps(slice.as_mut_ptr(), self.0);
164        }
165    }
166
167    /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
168    ///
169    /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
170    ///
171    /// To truncate to [`Vec3A`] use [`Vec3A::from()`].
172    #[inline]
173    #[must_use]
174    pub fn truncate(self) -> Vec3 {
175        use crate::swizzles::Vec4Swizzles;
176        self.xyz()
177    }
178
179    /// Creates a 4D vector from `self` with the given value of `x`.
180    #[inline]
181    #[must_use]
182    pub fn with_x(mut self, x: f32) -> Self {
183        self.x = x;
184        self
185    }
186
187    /// Creates a 4D vector from `self` with the given value of `y`.
188    #[inline]
189    #[must_use]
190    pub fn with_y(mut self, y: f32) -> Self {
191        self.y = y;
192        self
193    }
194
195    /// Creates a 4D vector from `self` with the given value of `z`.
196    #[inline]
197    #[must_use]
198    pub fn with_z(mut self, z: f32) -> Self {
199        self.z = z;
200        self
201    }
202
203    /// Creates a 4D vector from `self` with the given value of `w`.
204    #[inline]
205    #[must_use]
206    pub fn with_w(mut self, w: f32) -> Self {
207        self.w = w;
208        self
209    }
210
211    /// Computes the dot product of `self` and `rhs`.
212    #[inline]
213    #[must_use]
214    pub fn dot(self, rhs: Self) -> f32 {
215        unsafe { dot4(self.0, rhs.0) }
216    }
217
218    /// Returns a vector where every component is the dot product of `self` and `rhs`.
219    #[inline]
220    #[must_use]
221    pub fn dot_into_vec(self, rhs: Self) -> Self {
222        Self(unsafe { dot4_into_m128(self.0, rhs.0) })
223    }
224
225    /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
226    ///
227    /// In other words this computes `[self.x.min(rhs.x), self.y.min(rhs.y), ..]`.
228    #[inline]
229    #[must_use]
230    pub fn min(self, rhs: Self) -> Self {
231        Self(unsafe { _mm_min_ps(self.0, rhs.0) })
232    }
233
234    /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
235    ///
236    /// In other words this computes `[self.x.max(rhs.x), self.y.max(rhs.y), ..]`.
237    #[inline]
238    #[must_use]
239    pub fn max(self, rhs: Self) -> Self {
240        Self(unsafe { _mm_max_ps(self.0, rhs.0) })
241    }
242
243    /// Component-wise clamping of values, similar to [`f32::clamp`].
244    ///
245    /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
246    ///
247    /// # Panics
248    ///
249    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
250    #[inline]
251    #[must_use]
252    pub fn clamp(self, min: Self, max: Self) -> Self {
253        glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
254        self.max(min).min(max)
255    }
256
257    /// Returns the horizontal minimum of `self`.
258    ///
259    /// In other words this computes `min(x, y, ..)`.
260    #[inline]
261    #[must_use]
262    pub fn min_element(self) -> f32 {
263        unsafe {
264            let v = self.0;
265            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
266            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
267            _mm_cvtss_f32(v)
268        }
269    }
270
271    /// Returns the horizontal maximum of `self`.
272    ///
273    /// In other words this computes `max(x, y, ..)`.
274    #[inline]
275    #[must_use]
276    pub fn max_element(self) -> f32 {
277        unsafe {
278            let v = self.0;
279            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
280            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
281            _mm_cvtss_f32(v)
282        }
283    }
284
285    /// Returns the sum of all elements of `self`.
286    ///
287    /// In other words, this computes `self.x + self.y + ..`.
288    #[inline]
289    #[must_use]
290    pub fn element_sum(self) -> f32 {
291        unsafe {
292            let v = self.0;
293            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
294            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
295            _mm_cvtss_f32(v)
296        }
297    }
298
299    /// Returns the product of all elements of `self`.
300    ///
301    /// In other words, this computes `self.x * self.y * ..`.
302    #[inline]
303    #[must_use]
304    pub fn element_product(self) -> f32 {
305        unsafe {
306            let v = self.0;
307            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
308            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
309            _mm_cvtss_f32(v)
310        }
311    }
312
313    /// Returns a vector mask containing the result of a `==` comparison for each element of
314    /// `self` and `rhs`.
315    ///
316    /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
317    /// elements.
318    #[inline]
319    #[must_use]
320    pub fn cmpeq(self, rhs: Self) -> BVec4A {
321        BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
322    }
323
324    /// Returns a vector mask containing the result of a `!=` comparison for each element of
325    /// `self` and `rhs`.
326    ///
327    /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
328    /// elements.
329    #[inline]
330    #[must_use]
331    pub fn cmpne(self, rhs: Self) -> BVec4A {
332        BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
333    }
334
335    /// Returns a vector mask containing the result of a `>=` comparison for each element of
336    /// `self` and `rhs`.
337    ///
338    /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
339    /// elements.
340    #[inline]
341    #[must_use]
342    pub fn cmpge(self, rhs: Self) -> BVec4A {
343        BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
344    }
345
346    /// Returns a vector mask containing the result of a `>` comparison for each element of
347    /// `self` and `rhs`.
348    ///
349    /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
350    /// elements.
351    #[inline]
352    #[must_use]
353    pub fn cmpgt(self, rhs: Self) -> BVec4A {
354        BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
355    }
356
357    /// Returns a vector mask containing the result of a `<=` comparison for each element of
358    /// `self` and `rhs`.
359    ///
360    /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
361    /// elements.
362    #[inline]
363    #[must_use]
364    pub fn cmple(self, rhs: Self) -> BVec4A {
365        BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
366    }
367
368    /// Returns a vector mask containing the result of a `<` comparison for each element of
369    /// `self` and `rhs`.
370    ///
371    /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
372    /// elements.
373    #[inline]
374    #[must_use]
375    pub fn cmplt(self, rhs: Self) -> BVec4A {
376        BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
377    }
378
379    /// Returns a vector containing the absolute value of each element of `self`.
380    #[inline]
381    #[must_use]
382    pub fn abs(self) -> Self {
383        Self(unsafe { crate::sse2::m128_abs(self.0) })
384    }
385
386    /// Returns a vector with elements representing the sign of `self`.
387    ///
388    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
389    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
390    /// - `NAN` if the number is `NAN`
391    #[inline]
392    #[must_use]
393    pub fn signum(self) -> Self {
394        let result = Self(unsafe { _mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0) });
395        let mask = self.is_nan_mask();
396        Self::select(mask, self, result)
397    }
398
399    /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
400    #[inline]
401    #[must_use]
402    pub fn copysign(self, rhs: Self) -> Self {
403        let mask = Self::splat(-0.0);
404        Self(unsafe { _mm_or_ps(_mm_and_ps(rhs.0, mask.0), _mm_andnot_ps(mask.0, self.0)) })
405    }
406
407    /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
408    ///
409    /// A negative element results in a `1` bit and a positive element in a `0` bit.  Element `x` goes
410    /// into the first lowest bit, element `y` into the second, etc.
411    #[inline]
412    #[must_use]
413    pub fn is_negative_bitmask(self) -> u32 {
414        unsafe { _mm_movemask_ps(self.0) as u32 }
415    }
416
417    /// Returns `true` if, and only if, all elements are finite.  If any element is either
418    /// `NaN`, positive or negative infinity, this will return `false`.
419    #[inline]
420    #[must_use]
421    pub fn is_finite(self) -> bool {
422        self.is_finite_mask().all()
423    }
424
425    /// Performs `is_finite` on each element of self, returning a vector mask of the results.
426    ///
427    /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
428    pub fn is_finite_mask(self) -> BVec4A {
429        BVec4A(unsafe { _mm_cmplt_ps(crate::sse2::m128_abs(self.0), Self::INFINITY.0) })
430    }
431
432    /// Returns `true` if any elements are `NaN`.
433    #[inline]
434    #[must_use]
435    pub fn is_nan(self) -> bool {
436        self.is_nan_mask().any()
437    }
438
439    /// Performs `is_nan` on each element of self, returning a vector mask of the results.
440    ///
441    /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
442    #[inline]
443    #[must_use]
444    pub fn is_nan_mask(self) -> BVec4A {
445        BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
446    }
447
448    /// Computes the length of `self`.
449    #[doc(alias = "magnitude")]
450    #[inline]
451    #[must_use]
452    pub fn length(self) -> f32 {
453        unsafe {
454            let dot = dot4_in_x(self.0, self.0);
455            _mm_cvtss_f32(_mm_sqrt_ps(dot))
456        }
457    }
458
459    /// Computes the squared length of `self`.
460    ///
461    /// This is faster than `length()` as it avoids a square root operation.
462    #[doc(alias = "magnitude2")]
463    #[inline]
464    #[must_use]
465    pub fn length_squared(self) -> f32 {
466        self.dot(self)
467    }
468
469    /// Computes `1.0 / length()`.
470    ///
471    /// For valid results, `self` must _not_ be of length zero.
472    #[inline]
473    #[must_use]
474    pub fn length_recip(self) -> f32 {
475        unsafe {
476            let dot = dot4_in_x(self.0, self.0);
477            _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
478        }
479    }
480
481    /// Computes the Euclidean distance between two points in space.
482    #[inline]
483    #[must_use]
484    pub fn distance(self, rhs: Self) -> f32 {
485        (self - rhs).length()
486    }
487
488    /// Compute the squared euclidean distance between two points in space.
489    #[inline]
490    #[must_use]
491    pub fn distance_squared(self, rhs: Self) -> f32 {
492        (self - rhs).length_squared()
493    }
494
495    /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
496    #[inline]
497    #[must_use]
498    pub fn div_euclid(self, rhs: Self) -> Self {
499        Self::new(
500            math::div_euclid(self.x, rhs.x),
501            math::div_euclid(self.y, rhs.y),
502            math::div_euclid(self.z, rhs.z),
503            math::div_euclid(self.w, rhs.w),
504        )
505    }
506
507    /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
508    ///
509    /// [Euclidean division]: f32::rem_euclid
510    #[inline]
511    #[must_use]
512    pub fn rem_euclid(self, rhs: Self) -> Self {
513        Self::new(
514            math::rem_euclid(self.x, rhs.x),
515            math::rem_euclid(self.y, rhs.y),
516            math::rem_euclid(self.z, rhs.z),
517            math::rem_euclid(self.w, rhs.w),
518        )
519    }
520
521    /// Returns `self` normalized to length 1.0.
522    ///
523    /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
524    ///
525    /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
526    ///
527    /// Panics
528    ///
529    /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
530    #[inline]
531    #[must_use]
532    pub fn normalize(self) -> Self {
533        unsafe {
534            let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
535            #[allow(clippy::let_and_return)]
536            let normalized = Self(_mm_div_ps(self.0, length));
537            glam_assert!(normalized.is_finite());
538            normalized
539        }
540    }
541
542    /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
543    ///
544    /// In particular, if the input is zero (or very close to zero), or non-finite,
545    /// the result of this operation will be `None`.
546    ///
547    /// See also [`Self::normalize_or_zero()`].
548    #[inline]
549    #[must_use]
550    pub fn try_normalize(self) -> Option<Self> {
551        let rcp = self.length_recip();
552        if rcp.is_finite() && rcp > 0.0 {
553            Some(self * rcp)
554        } else {
555            None
556        }
557    }
558
559    /// Returns `self` normalized to length 1.0 if possible, else returns a
560    /// fallback value.
561    ///
562    /// In particular, if the input is zero (or very close to zero), or non-finite,
563    /// the result of this operation will be the fallback value.
564    ///
565    /// See also [`Self::try_normalize()`].
566    #[inline]
567    #[must_use]
568    pub fn normalize_or(self, fallback: Self) -> Self {
569        let rcp = self.length_recip();
570        if rcp.is_finite() && rcp > 0.0 {
571            self * rcp
572        } else {
573            fallback
574        }
575    }
576
577    /// Returns `self` normalized to length 1.0 if possible, else returns zero.
578    ///
579    /// In particular, if the input is zero (or very close to zero), or non-finite,
580    /// the result of this operation will be zero.
581    ///
582    /// See also [`Self::try_normalize()`].
583    #[inline]
584    #[must_use]
585    pub fn normalize_or_zero(self) -> Self {
586        self.normalize_or(Self::ZERO)
587    }
588
589    /// Returns whether `self` is length `1.0` or not.
590    ///
591    /// Uses a precision threshold of approximately `1e-4`.
592    #[inline]
593    #[must_use]
594    pub fn is_normalized(self) -> bool {
595        math::abs(self.length_squared() - 1.0) <= 2e-4
596    }
597
598    /// Returns the vector projection of `self` onto `rhs`.
599    ///
600    /// `rhs` must be of non-zero length.
601    ///
602    /// # Panics
603    ///
604    /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
605    #[inline]
606    #[must_use]
607    pub fn project_onto(self, rhs: Self) -> Self {
608        let other_len_sq_rcp = rhs.dot(rhs).recip();
609        glam_assert!(other_len_sq_rcp.is_finite());
610        rhs * self.dot(rhs) * other_len_sq_rcp
611    }
612
613    /// Returns the vector rejection of `self` from `rhs`.
614    ///
615    /// The vector rejection is the vector perpendicular to the projection of `self` onto
616    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
617    ///
618    /// `rhs` must be of non-zero length.
619    ///
620    /// # Panics
621    ///
622    /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
623    #[doc(alias("plane"))]
624    #[inline]
625    #[must_use]
626    pub fn reject_from(self, rhs: Self) -> Self {
627        self - self.project_onto(rhs)
628    }
629
630    /// Returns the vector projection of `self` onto `rhs`.
631    ///
632    /// `rhs` must be normalized.
633    ///
634    /// # Panics
635    ///
636    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
637    #[inline]
638    #[must_use]
639    pub fn project_onto_normalized(self, rhs: Self) -> Self {
640        glam_assert!(rhs.is_normalized());
641        rhs * self.dot(rhs)
642    }
643
644    /// Returns the vector rejection of `self` from `rhs`.
645    ///
646    /// The vector rejection is the vector perpendicular to the projection of `self` onto
647    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
648    ///
649    /// `rhs` must be normalized.
650    ///
651    /// # Panics
652    ///
653    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
654    #[doc(alias("plane"))]
655    #[inline]
656    #[must_use]
657    pub fn reject_from_normalized(self, rhs: Self) -> Self {
658        self - self.project_onto_normalized(rhs)
659    }
660
661    /// Returns a vector containing the nearest integer to a number for each element of `self`.
662    /// Round half-way cases away from 0.0.
663    #[inline]
664    #[must_use]
665    pub fn round(self) -> Self {
666        Self(unsafe { m128_round(self.0) })
667    }
668
669    /// Returns a vector containing the largest integer less than or equal to a number for each
670    /// element of `self`.
671    #[inline]
672    #[must_use]
673    pub fn floor(self) -> Self {
674        Self(unsafe { m128_floor(self.0) })
675    }
676
677    /// Returns a vector containing the smallest integer greater than or equal to a number for
678    /// each element of `self`.
679    #[inline]
680    #[must_use]
681    pub fn ceil(self) -> Self {
682        Self(unsafe { m128_ceil(self.0) })
683    }
684
685    /// Returns a vector containing the integer part each element of `self`. This means numbers are
686    /// always truncated towards zero.
687    #[inline]
688    #[must_use]
689    pub fn trunc(self) -> Self {
690        Self(unsafe { m128_trunc(self.0) })
691    }
692
693    /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
694    ///
695    /// Note that this differs from the GLSL implementation of `fract` which returns
696    /// `self - self.floor()`.
697    ///
698    /// Note that this is fast but not precise for large numbers.
699    #[inline]
700    #[must_use]
701    pub fn fract(self) -> Self {
702        self - self.trunc()
703    }
704
705    /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
706    ///
707    /// Note that this differs from the Rust implementation of `fract` which returns
708    /// `self - self.trunc()`.
709    ///
710    /// Note that this is fast but not precise for large numbers.
711    #[inline]
712    #[must_use]
713    pub fn fract_gl(self) -> Self {
714        self - self.floor()
715    }
716
717    /// Returns a vector containing `e^self` (the exponential function) for each element of
718    /// `self`.
719    #[inline]
720    #[must_use]
721    pub fn exp(self) -> Self {
722        Self::new(
723            math::exp(self.x),
724            math::exp(self.y),
725            math::exp(self.z),
726            math::exp(self.w),
727        )
728    }
729
730    /// Returns a vector containing each element of `self` raised to the power of `n`.
731    #[inline]
732    #[must_use]
733    pub fn powf(self, n: f32) -> Self {
734        Self::new(
735            math::powf(self.x, n),
736            math::powf(self.y, n),
737            math::powf(self.z, n),
738            math::powf(self.w, n),
739        )
740    }
741
742    /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
743    #[inline]
744    #[must_use]
745    pub fn recip(self) -> Self {
746        Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
747    }
748
749    /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
750    ///
751    /// When `s` is `0.0`, the result will be equal to `self`.  When `s` is `1.0`, the result
752    /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
753    /// extrapolated.
754    #[doc(alias = "mix")]
755    #[inline]
756    #[must_use]
757    pub fn lerp(self, rhs: Self, s: f32) -> Self {
758        self * (1.0 - s) + rhs * s
759    }
760
761    /// Moves towards `rhs` based on the value `d`.
762    ///
763    /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
764    /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
765    #[inline]
766    #[must_use]
767    pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
768        let a = rhs - *self;
769        let len = a.length();
770        if len <= d || len <= 1e-4 {
771            return rhs;
772        }
773        *self + a / len * d
774    }
775
776    /// Calculates the midpoint between `self` and `rhs`.
777    ///
778    /// The midpoint is the average of, or halfway point between, two vectors.
779    /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
780    /// while being slightly cheaper to compute.
781    #[inline]
782    pub fn midpoint(self, rhs: Self) -> Self {
783        (self + rhs) * 0.5
784    }
785
786    /// Returns true if the absolute difference of all elements between `self` and `rhs` is
787    /// less than or equal to `max_abs_diff`.
788    ///
789    /// This can be used to compare if two vectors contain similar elements. It works best when
790    /// comparing with a known value. The `max_abs_diff` that should be used used depends on
791    /// the values being compared against.
792    ///
793    /// For more see
794    /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
795    #[inline]
796    #[must_use]
797    pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
798        self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
799    }
800
801    /// Returns a vector with a length no less than `min` and no more than `max`.
802    ///
803    /// # Panics
804    ///
805    /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
806    #[inline]
807    #[must_use]
808    pub fn clamp_length(self, min: f32, max: f32) -> Self {
809        glam_assert!(0.0 <= min);
810        glam_assert!(min <= max);
811        let length_sq = self.length_squared();
812        if length_sq < min * min {
813            min * (self / math::sqrt(length_sq))
814        } else if length_sq > max * max {
815            max * (self / math::sqrt(length_sq))
816        } else {
817            self
818        }
819    }
820
821    /// Returns a vector with a length no more than `max`.
822    ///
823    /// # Panics
824    ///
825    /// Will panic if `max` is negative when `glam_assert` is enabled.
826    #[inline]
827    #[must_use]
828    pub fn clamp_length_max(self, max: f32) -> Self {
829        glam_assert!(0.0 <= max);
830        let length_sq = self.length_squared();
831        if length_sq > max * max {
832            max * (self / math::sqrt(length_sq))
833        } else {
834            self
835        }
836    }
837
838    /// Returns a vector with a length no less than `min`.
839    ///
840    /// # Panics
841    ///
842    /// Will panic if `min` is negative when `glam_assert` is enabled.
843    #[inline]
844    #[must_use]
845    pub fn clamp_length_min(self, min: f32) -> Self {
846        glam_assert!(0.0 <= min);
847        let length_sq = self.length_squared();
848        if length_sq < min * min {
849            min * (self / math::sqrt(length_sq))
850        } else {
851            self
852        }
853    }
854
855    /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
856    /// error, yielding a more accurate result than an unfused multiply-add.
857    ///
858    /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
859    /// architecture has a dedicated fma CPU instruction. However, this is not always true,
860    /// and will be heavily dependant on designing algorithms with specific target hardware in
861    /// mind.
862    #[inline]
863    #[must_use]
864    pub fn mul_add(self, a: Self, b: Self) -> Self {
865        #[cfg(target_feature = "fma")]
866        unsafe {
867            Self(_mm_fmadd_ps(self.0, a.0, b.0))
868        }
869        #[cfg(not(target_feature = "fma"))]
870        Self::new(
871            math::mul_add(self.x, a.x, b.x),
872            math::mul_add(self.y, a.y, b.y),
873            math::mul_add(self.z, a.z, b.z),
874            math::mul_add(self.w, a.w, b.w),
875        )
876    }
877
878    /// Returns the reflection vector for a given incident vector `self` and surface normal
879    /// `normal`.
880    ///
881    /// `normal` must be normalized.
882    ///
883    /// # Panics
884    ///
885    /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
886    #[inline]
887    #[must_use]
888    pub fn reflect(self, normal: Self) -> Self {
889        glam_assert!(normal.is_normalized());
890        self - 2.0 * self.dot(normal) * normal
891    }
892
893    /// Returns the refraction direction for a given incident vector `self`, surface normal
894    /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
895    /// a zero vector will be returned.
896    ///
897    /// `self` and `normal` must be normalized.
898    ///
899    /// # Panics
900    ///
901    /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
902    #[inline]
903    #[must_use]
904    pub fn refract(self, normal: Self, eta: f32) -> Self {
905        glam_assert!(self.is_normalized());
906        glam_assert!(normal.is_normalized());
907        let n_dot_i = normal.dot(self);
908        let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
909        if k >= 0.0 {
910            eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
911        } else {
912            Self::ZERO
913        }
914    }
915
916    /// Casts all elements of `self` to `f64`.
917    #[inline]
918    #[must_use]
919    pub fn as_dvec4(&self) -> crate::DVec4 {
920        crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
921    }
922
923    /// Casts all elements of `self` to `i8`.
924    #[inline]
925    #[must_use]
926    pub fn as_i8vec4(&self) -> crate::I8Vec4 {
927        crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
928    }
929
930    /// Casts all elements of `self` to `u8`.
931    #[inline]
932    #[must_use]
933    pub fn as_u8vec4(&self) -> crate::U8Vec4 {
934        crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
935    }
936
937    /// Casts all elements of `self` to `i16`.
938    #[inline]
939    #[must_use]
940    pub fn as_i16vec4(&self) -> crate::I16Vec4 {
941        crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
942    }
943
944    /// Casts all elements of `self` to `u16`.
945    #[inline]
946    #[must_use]
947    pub fn as_u16vec4(&self) -> crate::U16Vec4 {
948        crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
949    }
950
951    /// Casts all elements of `self` to `i32`.
952    #[inline]
953    #[must_use]
954    pub fn as_ivec4(&self) -> crate::IVec4 {
955        crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
956    }
957
958    /// Casts all elements of `self` to `u32`.
959    #[inline]
960    #[must_use]
961    pub fn as_uvec4(&self) -> crate::UVec4 {
962        crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
963    }
964
965    /// Casts all elements of `self` to `i64`.
966    #[inline]
967    #[must_use]
968    pub fn as_i64vec4(&self) -> crate::I64Vec4 {
969        crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
970    }
971
972    /// Casts all elements of `self` to `u64`.
973    #[inline]
974    #[must_use]
975    pub fn as_u64vec4(&self) -> crate::U64Vec4 {
976        crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
977    }
978}
979
980impl Default for Vec4 {
981    #[inline(always)]
982    fn default() -> Self {
983        Self::ZERO
984    }
985}
986
987impl PartialEq for Vec4 {
988    #[inline]
989    fn eq(&self, rhs: &Self) -> bool {
990        self.cmpeq(*rhs).all()
991    }
992}
993
994impl Div<Vec4> for Vec4 {
995    type Output = Self;
996    #[inline]
997    fn div(self, rhs: Self) -> Self {
998        Self(unsafe { _mm_div_ps(self.0, rhs.0) })
999    }
1000}
1001
1002impl Div<&Vec4> for Vec4 {
1003    type Output = Vec4;
1004    #[inline]
1005    fn div(self, rhs: &Vec4) -> Vec4 {
1006        self.div(*rhs)
1007    }
1008}
1009
1010impl Div<&Vec4> for &Vec4 {
1011    type Output = Vec4;
1012    #[inline]
1013    fn div(self, rhs: &Vec4) -> Vec4 {
1014        (*self).div(*rhs)
1015    }
1016}
1017
1018impl Div<Vec4> for &Vec4 {
1019    type Output = Vec4;
1020    #[inline]
1021    fn div(self, rhs: Vec4) -> Vec4 {
1022        (*self).div(rhs)
1023    }
1024}
1025
1026impl DivAssign<Vec4> for Vec4 {
1027    #[inline]
1028    fn div_assign(&mut self, rhs: Self) {
1029        self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
1030    }
1031}
1032
1033impl DivAssign<&Self> for Vec4 {
1034    #[inline]
1035    fn div_assign(&mut self, rhs: &Self) {
1036        self.div_assign(*rhs)
1037    }
1038}
1039
1040impl Div<f32> for Vec4 {
1041    type Output = Self;
1042    #[inline]
1043    fn div(self, rhs: f32) -> Self {
1044        Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
1045    }
1046}
1047
1048impl Div<&f32> for Vec4 {
1049    type Output = Vec4;
1050    #[inline]
1051    fn div(self, rhs: &f32) -> Vec4 {
1052        self.div(*rhs)
1053    }
1054}
1055
1056impl Div<&f32> for &Vec4 {
1057    type Output = Vec4;
1058    #[inline]
1059    fn div(self, rhs: &f32) -> Vec4 {
1060        (*self).div(*rhs)
1061    }
1062}
1063
1064impl Div<f32> for &Vec4 {
1065    type Output = Vec4;
1066    #[inline]
1067    fn div(self, rhs: f32) -> Vec4 {
1068        (*self).div(rhs)
1069    }
1070}
1071
1072impl DivAssign<f32> for Vec4 {
1073    #[inline]
1074    fn div_assign(&mut self, rhs: f32) {
1075        self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
1076    }
1077}
1078
1079impl DivAssign<&f32> for Vec4 {
1080    #[inline]
1081    fn div_assign(&mut self, rhs: &f32) {
1082        self.div_assign(*rhs)
1083    }
1084}
1085
1086impl Div<Vec4> for f32 {
1087    type Output = Vec4;
1088    #[inline]
1089    fn div(self, rhs: Vec4) -> Vec4 {
1090        Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
1091    }
1092}
1093
1094impl Div<&Vec4> for f32 {
1095    type Output = Vec4;
1096    #[inline]
1097    fn div(self, rhs: &Vec4) -> Vec4 {
1098        self.div(*rhs)
1099    }
1100}
1101
1102impl Div<&Vec4> for &f32 {
1103    type Output = Vec4;
1104    #[inline]
1105    fn div(self, rhs: &Vec4) -> Vec4 {
1106        (*self).div(*rhs)
1107    }
1108}
1109
1110impl Div<Vec4> for &f32 {
1111    type Output = Vec4;
1112    #[inline]
1113    fn div(self, rhs: Vec4) -> Vec4 {
1114        (*self).div(rhs)
1115    }
1116}
1117
1118impl Mul<Vec4> for Vec4 {
1119    type Output = Self;
1120    #[inline]
1121    fn mul(self, rhs: Self) -> Self {
1122        Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
1123    }
1124}
1125
1126impl Mul<&Vec4> for Vec4 {
1127    type Output = Vec4;
1128    #[inline]
1129    fn mul(self, rhs: &Vec4) -> Vec4 {
1130        self.mul(*rhs)
1131    }
1132}
1133
1134impl Mul<&Vec4> for &Vec4 {
1135    type Output = Vec4;
1136    #[inline]
1137    fn mul(self, rhs: &Vec4) -> Vec4 {
1138        (*self).mul(*rhs)
1139    }
1140}
1141
1142impl Mul<Vec4> for &Vec4 {
1143    type Output = Vec4;
1144    #[inline]
1145    fn mul(self, rhs: Vec4) -> Vec4 {
1146        (*self).mul(rhs)
1147    }
1148}
1149
1150impl MulAssign<Vec4> for Vec4 {
1151    #[inline]
1152    fn mul_assign(&mut self, rhs: Self) {
1153        self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
1154    }
1155}
1156
1157impl MulAssign<&Self> for Vec4 {
1158    #[inline]
1159    fn mul_assign(&mut self, rhs: &Self) {
1160        self.mul_assign(*rhs)
1161    }
1162}
1163
1164impl Mul<f32> for Vec4 {
1165    type Output = Self;
1166    #[inline]
1167    fn mul(self, rhs: f32) -> Self {
1168        Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
1169    }
1170}
1171
1172impl Mul<&f32> for Vec4 {
1173    type Output = Vec4;
1174    #[inline]
1175    fn mul(self, rhs: &f32) -> Vec4 {
1176        self.mul(*rhs)
1177    }
1178}
1179
1180impl Mul<&f32> for &Vec4 {
1181    type Output = Vec4;
1182    #[inline]
1183    fn mul(self, rhs: &f32) -> Vec4 {
1184        (*self).mul(*rhs)
1185    }
1186}
1187
1188impl Mul<f32> for &Vec4 {
1189    type Output = Vec4;
1190    #[inline]
1191    fn mul(self, rhs: f32) -> Vec4 {
1192        (*self).mul(rhs)
1193    }
1194}
1195
1196impl MulAssign<f32> for Vec4 {
1197    #[inline]
1198    fn mul_assign(&mut self, rhs: f32) {
1199        self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
1200    }
1201}
1202
1203impl MulAssign<&f32> for Vec4 {
1204    #[inline]
1205    fn mul_assign(&mut self, rhs: &f32) {
1206        self.mul_assign(*rhs)
1207    }
1208}
1209
1210impl Mul<Vec4> for f32 {
1211    type Output = Vec4;
1212    #[inline]
1213    fn mul(self, rhs: Vec4) -> Vec4 {
1214        Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
1215    }
1216}
1217
1218impl Mul<&Vec4> for f32 {
1219    type Output = Vec4;
1220    #[inline]
1221    fn mul(self, rhs: &Vec4) -> Vec4 {
1222        self.mul(*rhs)
1223    }
1224}
1225
1226impl Mul<&Vec4> for &f32 {
1227    type Output = Vec4;
1228    #[inline]
1229    fn mul(self, rhs: &Vec4) -> Vec4 {
1230        (*self).mul(*rhs)
1231    }
1232}
1233
1234impl Mul<Vec4> for &f32 {
1235    type Output = Vec4;
1236    #[inline]
1237    fn mul(self, rhs: Vec4) -> Vec4 {
1238        (*self).mul(rhs)
1239    }
1240}
1241
1242impl Add<Vec4> for Vec4 {
1243    type Output = Self;
1244    #[inline]
1245    fn add(self, rhs: Self) -> Self {
1246        Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1247    }
1248}
1249
1250impl Add<&Vec4> for Vec4 {
1251    type Output = Vec4;
1252    #[inline]
1253    fn add(self, rhs: &Vec4) -> Vec4 {
1254        self.add(*rhs)
1255    }
1256}
1257
1258impl Add<&Vec4> for &Vec4 {
1259    type Output = Vec4;
1260    #[inline]
1261    fn add(self, rhs: &Vec4) -> Vec4 {
1262        (*self).add(*rhs)
1263    }
1264}
1265
1266impl Add<Vec4> for &Vec4 {
1267    type Output = Vec4;
1268    #[inline]
1269    fn add(self, rhs: Vec4) -> Vec4 {
1270        (*self).add(rhs)
1271    }
1272}
1273
1274impl AddAssign<Vec4> for Vec4 {
1275    #[inline]
1276    fn add_assign(&mut self, rhs: Self) {
1277        self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1278    }
1279}
1280
1281impl AddAssign<&Self> for Vec4 {
1282    #[inline]
1283    fn add_assign(&mut self, rhs: &Self) {
1284        self.add_assign(*rhs)
1285    }
1286}
1287
1288impl Add<f32> for Vec4 {
1289    type Output = Self;
1290    #[inline]
1291    fn add(self, rhs: f32) -> Self {
1292        Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1293    }
1294}
1295
1296impl Add<&f32> for Vec4 {
1297    type Output = Vec4;
1298    #[inline]
1299    fn add(self, rhs: &f32) -> Vec4 {
1300        self.add(*rhs)
1301    }
1302}
1303
1304impl Add<&f32> for &Vec4 {
1305    type Output = Vec4;
1306    #[inline]
1307    fn add(self, rhs: &f32) -> Vec4 {
1308        (*self).add(*rhs)
1309    }
1310}
1311
1312impl Add<f32> for &Vec4 {
1313    type Output = Vec4;
1314    #[inline]
1315    fn add(self, rhs: f32) -> Vec4 {
1316        (*self).add(rhs)
1317    }
1318}
1319
1320impl AddAssign<f32> for Vec4 {
1321    #[inline]
1322    fn add_assign(&mut self, rhs: f32) {
1323        self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1324    }
1325}
1326
1327impl AddAssign<&f32> for Vec4 {
1328    #[inline]
1329    fn add_assign(&mut self, rhs: &f32) {
1330        self.add_assign(*rhs)
1331    }
1332}
1333
1334impl Add<Vec4> for f32 {
1335    type Output = Vec4;
1336    #[inline]
1337    fn add(self, rhs: Vec4) -> Vec4 {
1338        Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1339    }
1340}
1341
1342impl Add<&Vec4> for f32 {
1343    type Output = Vec4;
1344    #[inline]
1345    fn add(self, rhs: &Vec4) -> Vec4 {
1346        self.add(*rhs)
1347    }
1348}
1349
1350impl Add<&Vec4> for &f32 {
1351    type Output = Vec4;
1352    #[inline]
1353    fn add(self, rhs: &Vec4) -> Vec4 {
1354        (*self).add(*rhs)
1355    }
1356}
1357
1358impl Add<Vec4> for &f32 {
1359    type Output = Vec4;
1360    #[inline]
1361    fn add(self, rhs: Vec4) -> Vec4 {
1362        (*self).add(rhs)
1363    }
1364}
1365
1366impl Sub<Vec4> for Vec4 {
1367    type Output = Self;
1368    #[inline]
1369    fn sub(self, rhs: Self) -> Self {
1370        Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1371    }
1372}
1373
1374impl Sub<&Vec4> for Vec4 {
1375    type Output = Vec4;
1376    #[inline]
1377    fn sub(self, rhs: &Vec4) -> Vec4 {
1378        self.sub(*rhs)
1379    }
1380}
1381
1382impl Sub<&Vec4> for &Vec4 {
1383    type Output = Vec4;
1384    #[inline]
1385    fn sub(self, rhs: &Vec4) -> Vec4 {
1386        (*self).sub(*rhs)
1387    }
1388}
1389
1390impl Sub<Vec4> for &Vec4 {
1391    type Output = Vec4;
1392    #[inline]
1393    fn sub(self, rhs: Vec4) -> Vec4 {
1394        (*self).sub(rhs)
1395    }
1396}
1397
1398impl SubAssign<Vec4> for Vec4 {
1399    #[inline]
1400    fn sub_assign(&mut self, rhs: Vec4) {
1401        self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1402    }
1403}
1404
1405impl SubAssign<&Self> for Vec4 {
1406    #[inline]
1407    fn sub_assign(&mut self, rhs: &Self) {
1408        self.sub_assign(*rhs)
1409    }
1410}
1411
1412impl Sub<f32> for Vec4 {
1413    type Output = Self;
1414    #[inline]
1415    fn sub(self, rhs: f32) -> Self {
1416        Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1417    }
1418}
1419
1420impl Sub<&f32> for Vec4 {
1421    type Output = Vec4;
1422    #[inline]
1423    fn sub(self, rhs: &f32) -> Vec4 {
1424        self.sub(*rhs)
1425    }
1426}
1427
1428impl Sub<&f32> for &Vec4 {
1429    type Output = Vec4;
1430    #[inline]
1431    fn sub(self, rhs: &f32) -> Vec4 {
1432        (*self).sub(*rhs)
1433    }
1434}
1435
1436impl Sub<f32> for &Vec4 {
1437    type Output = Vec4;
1438    #[inline]
1439    fn sub(self, rhs: f32) -> Vec4 {
1440        (*self).sub(rhs)
1441    }
1442}
1443
1444impl SubAssign<f32> for Vec4 {
1445    #[inline]
1446    fn sub_assign(&mut self, rhs: f32) {
1447        self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1448    }
1449}
1450
1451impl SubAssign<&f32> for Vec4 {
1452    #[inline]
1453    fn sub_assign(&mut self, rhs: &f32) {
1454        self.sub_assign(*rhs)
1455    }
1456}
1457
1458impl Sub<Vec4> for f32 {
1459    type Output = Vec4;
1460    #[inline]
1461    fn sub(self, rhs: Vec4) -> Vec4 {
1462        Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1463    }
1464}
1465
1466impl Sub<&Vec4> for f32 {
1467    type Output = Vec4;
1468    #[inline]
1469    fn sub(self, rhs: &Vec4) -> Vec4 {
1470        self.sub(*rhs)
1471    }
1472}
1473
1474impl Sub<&Vec4> for &f32 {
1475    type Output = Vec4;
1476    #[inline]
1477    fn sub(self, rhs: &Vec4) -> Vec4 {
1478        (*self).sub(*rhs)
1479    }
1480}
1481
1482impl Sub<Vec4> for &f32 {
1483    type Output = Vec4;
1484    #[inline]
1485    fn sub(self, rhs: Vec4) -> Vec4 {
1486        (*self).sub(rhs)
1487    }
1488}
1489
1490impl Rem<Vec4> for Vec4 {
1491    type Output = Self;
1492    #[inline]
1493    fn rem(self, rhs: Self) -> Self {
1494        unsafe {
1495            let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1496            Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1497        }
1498    }
1499}
1500
1501impl Rem<&Vec4> for Vec4 {
1502    type Output = Vec4;
1503    #[inline]
1504    fn rem(self, rhs: &Vec4) -> Vec4 {
1505        self.rem(*rhs)
1506    }
1507}
1508
1509impl Rem<&Vec4> for &Vec4 {
1510    type Output = Vec4;
1511    #[inline]
1512    fn rem(self, rhs: &Vec4) -> Vec4 {
1513        (*self).rem(*rhs)
1514    }
1515}
1516
1517impl Rem<Vec4> for &Vec4 {
1518    type Output = Vec4;
1519    #[inline]
1520    fn rem(self, rhs: Vec4) -> Vec4 {
1521        (*self).rem(rhs)
1522    }
1523}
1524
1525impl RemAssign<Vec4> for Vec4 {
1526    #[inline]
1527    fn rem_assign(&mut self, rhs: Self) {
1528        *self = self.rem(rhs);
1529    }
1530}
1531
1532impl RemAssign<&Self> for Vec4 {
1533    #[inline]
1534    fn rem_assign(&mut self, rhs: &Self) {
1535        self.rem_assign(*rhs)
1536    }
1537}
1538
1539impl Rem<f32> for Vec4 {
1540    type Output = Self;
1541    #[inline]
1542    fn rem(self, rhs: f32) -> Self {
1543        self.rem(Self::splat(rhs))
1544    }
1545}
1546
1547impl Rem<&f32> for Vec4 {
1548    type Output = Vec4;
1549    #[inline]
1550    fn rem(self, rhs: &f32) -> Vec4 {
1551        self.rem(*rhs)
1552    }
1553}
1554
1555impl Rem<&f32> for &Vec4 {
1556    type Output = Vec4;
1557    #[inline]
1558    fn rem(self, rhs: &f32) -> Vec4 {
1559        (*self).rem(*rhs)
1560    }
1561}
1562
1563impl Rem<f32> for &Vec4 {
1564    type Output = Vec4;
1565    #[inline]
1566    fn rem(self, rhs: f32) -> Vec4 {
1567        (*self).rem(rhs)
1568    }
1569}
1570
1571impl RemAssign<f32> for Vec4 {
1572    #[inline]
1573    fn rem_assign(&mut self, rhs: f32) {
1574        *self = self.rem(Self::splat(rhs));
1575    }
1576}
1577
1578impl RemAssign<&f32> for Vec4 {
1579    #[inline]
1580    fn rem_assign(&mut self, rhs: &f32) {
1581        self.rem_assign(*rhs)
1582    }
1583}
1584
1585impl Rem<Vec4> for f32 {
1586    type Output = Vec4;
1587    #[inline]
1588    fn rem(self, rhs: Vec4) -> Vec4 {
1589        Vec4::splat(self).rem(rhs)
1590    }
1591}
1592
1593impl Rem<&Vec4> for f32 {
1594    type Output = Vec4;
1595    #[inline]
1596    fn rem(self, rhs: &Vec4) -> Vec4 {
1597        self.rem(*rhs)
1598    }
1599}
1600
1601impl Rem<&Vec4> for &f32 {
1602    type Output = Vec4;
1603    #[inline]
1604    fn rem(self, rhs: &Vec4) -> Vec4 {
1605        (*self).rem(*rhs)
1606    }
1607}
1608
1609impl Rem<Vec4> for &f32 {
1610    type Output = Vec4;
1611    #[inline]
1612    fn rem(self, rhs: Vec4) -> Vec4 {
1613        (*self).rem(rhs)
1614    }
1615}
1616
1617#[cfg(not(target_arch = "spirv"))]
1618impl AsRef<[f32; 4]> for Vec4 {
1619    #[inline]
1620    fn as_ref(&self) -> &[f32; 4] {
1621        unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
1622    }
1623}
1624
1625#[cfg(not(target_arch = "spirv"))]
1626impl AsMut<[f32; 4]> for Vec4 {
1627    #[inline]
1628    fn as_mut(&mut self) -> &mut [f32; 4] {
1629        unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
1630    }
1631}
1632
1633impl Sum for Vec4 {
1634    #[inline]
1635    fn sum<I>(iter: I) -> Self
1636    where
1637        I: Iterator<Item = Self>,
1638    {
1639        iter.fold(Self::ZERO, Self::add)
1640    }
1641}
1642
1643impl<'a> Sum<&'a Self> for Vec4 {
1644    #[inline]
1645    fn sum<I>(iter: I) -> Self
1646    where
1647        I: Iterator<Item = &'a Self>,
1648    {
1649        iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1650    }
1651}
1652
1653impl Product for Vec4 {
1654    #[inline]
1655    fn product<I>(iter: I) -> Self
1656    where
1657        I: Iterator<Item = Self>,
1658    {
1659        iter.fold(Self::ONE, Self::mul)
1660    }
1661}
1662
1663impl<'a> Product<&'a Self> for Vec4 {
1664    #[inline]
1665    fn product<I>(iter: I) -> Self
1666    where
1667        I: Iterator<Item = &'a Self>,
1668    {
1669        iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1670    }
1671}
1672
1673impl Neg for Vec4 {
1674    type Output = Self;
1675    #[inline]
1676    fn neg(self) -> Self {
1677        Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1678    }
1679}
1680
1681impl Neg for &Vec4 {
1682    type Output = Vec4;
1683    #[inline]
1684    fn neg(self) -> Vec4 {
1685        (*self).neg()
1686    }
1687}
1688
1689impl Index<usize> for Vec4 {
1690    type Output = f32;
1691    #[inline]
1692    fn index(&self, index: usize) -> &Self::Output {
1693        match index {
1694            0 => &self.x,
1695            1 => &self.y,
1696            2 => &self.z,
1697            3 => &self.w,
1698            _ => panic!("index out of bounds"),
1699        }
1700    }
1701}
1702
1703impl IndexMut<usize> for Vec4 {
1704    #[inline]
1705    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1706        match index {
1707            0 => &mut self.x,
1708            1 => &mut self.y,
1709            2 => &mut self.z,
1710            3 => &mut self.w,
1711            _ => panic!("index out of bounds"),
1712        }
1713    }
1714}
1715
1716impl fmt::Display for Vec4 {
1717    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1718        if let Some(p) = f.precision() {
1719            write!(
1720                f,
1721                "[{:.*}, {:.*}, {:.*}, {:.*}]",
1722                p, self.x, p, self.y, p, self.z, p, self.w
1723            )
1724        } else {
1725            write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1726        }
1727    }
1728}
1729
1730impl fmt::Debug for Vec4 {
1731    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1732        fmt.debug_tuple(stringify!(Vec4))
1733            .field(&self.x)
1734            .field(&self.y)
1735            .field(&self.z)
1736            .field(&self.w)
1737            .finish()
1738    }
1739}
1740
1741impl From<Vec4> for __m128 {
1742    #[inline(always)]
1743    fn from(t: Vec4) -> Self {
1744        t.0
1745    }
1746}
1747
1748impl From<__m128> for Vec4 {
1749    #[inline(always)]
1750    fn from(t: __m128) -> Self {
1751        Self(t)
1752    }
1753}
1754
1755impl From<[f32; 4]> for Vec4 {
1756    #[inline]
1757    fn from(a: [f32; 4]) -> Self {
1758        Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1759    }
1760}
1761
1762impl From<Vec4> for [f32; 4] {
1763    #[inline]
1764    fn from(v: Vec4) -> Self {
1765        use crate::Align16;
1766        use core::mem::MaybeUninit;
1767        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1768        unsafe {
1769            _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1770            out.assume_init().0
1771        }
1772    }
1773}
1774
1775impl From<(f32, f32, f32, f32)> for Vec4 {
1776    #[inline]
1777    fn from(t: (f32, f32, f32, f32)) -> Self {
1778        Self::new(t.0, t.1, t.2, t.3)
1779    }
1780}
1781
1782impl From<Vec4> for (f32, f32, f32, f32) {
1783    #[inline]
1784    fn from(v: Vec4) -> Self {
1785        use crate::Align16;
1786        use core::mem::MaybeUninit;
1787        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1788        unsafe {
1789            _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1790            out.assume_init().0
1791        }
1792    }
1793}
1794
1795impl From<(Vec3A, f32)> for Vec4 {
1796    #[inline]
1797    fn from((v, w): (Vec3A, f32)) -> Self {
1798        v.extend(w)
1799    }
1800}
1801
1802impl From<(f32, Vec3A)> for Vec4 {
1803    #[inline]
1804    fn from((x, v): (f32, Vec3A)) -> Self {
1805        Self::new(x, v.x, v.y, v.z)
1806    }
1807}
1808
1809impl From<(Vec3, f32)> for Vec4 {
1810    #[inline]
1811    fn from((v, w): (Vec3, f32)) -> Self {
1812        Self::new(v.x, v.y, v.z, w)
1813    }
1814}
1815
1816impl From<(f32, Vec3)> for Vec4 {
1817    #[inline]
1818    fn from((x, v): (f32, Vec3)) -> Self {
1819        Self::new(x, v.x, v.y, v.z)
1820    }
1821}
1822
1823impl From<(Vec2, f32, f32)> for Vec4 {
1824    #[inline]
1825    fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1826        Self::new(v.x, v.y, z, w)
1827    }
1828}
1829
1830impl From<(Vec2, Vec2)> for Vec4 {
1831    #[inline]
1832    fn from((v, u): (Vec2, Vec2)) -> Self {
1833        Self::new(v.x, v.y, u.x, u.y)
1834    }
1835}
1836
1837impl Deref for Vec4 {
1838    type Target = crate::deref::Vec4<f32>;
1839    #[inline]
1840    fn deref(&self) -> &Self::Target {
1841        unsafe { &*(self as *const Self).cast() }
1842    }
1843}
1844
1845impl DerefMut for Vec4 {
1846    #[inline]
1847    fn deref_mut(&mut self) -> &mut Self::Target {
1848        unsafe { &mut *(self as *mut Self).cast() }
1849    }
1850}
1851
1852impl From<BVec4> for Vec4 {
1853    #[inline]
1854    fn from(v: BVec4) -> Self {
1855        Self::new(
1856            f32::from(v.x),
1857            f32::from(v.y),
1858            f32::from(v.z),
1859            f32::from(v.w),
1860        )
1861    }
1862}
1863
1864#[cfg(not(feature = "scalar-math"))]
1865
1866impl From<BVec4A> for Vec4 {
1867    #[inline]
1868    fn from(v: BVec4A) -> Self {
1869        let bool_array: [bool; 4] = v.into();
1870        Self::new(
1871            f32::from(bool_array[0]),
1872            f32::from(bool_array[1]),
1873            f32::from(bool_array[2]),
1874            f32::from(bool_array[3]),
1875        )
1876    }
1877}