1use crate::{f32::math, sse2::*, BVec3, BVec3A, Vec2, Vec3, Vec4};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[cfg(target_arch = "x86")]
10use core::arch::x86::*;
11#[cfg(target_arch = "x86_64")]
12use core::arch::x86_64::*;
13
14#[repr(C)]
15union UnionCast {
16 a: [f32; 4],
17 v: Vec3A,
18}
19
20#[inline(always)]
22#[must_use]
23pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
24 Vec3A::new(x, y, z)
25}
26
27#[derive(Clone, Copy)]
37#[repr(transparent)]
38pub struct Vec3A(pub(crate) __m128);
39
40impl Vec3A {
41 pub const ZERO: Self = Self::splat(0.0);
43
44 pub const ONE: Self = Self::splat(1.0);
46
47 pub const NEG_ONE: Self = Self::splat(-1.0);
49
50 pub const MIN: Self = Self::splat(f32::MIN);
52
53 pub const MAX: Self = Self::splat(f32::MAX);
55
56 pub const NAN: Self = Self::splat(f32::NAN);
58
59 pub const INFINITY: Self = Self::splat(f32::INFINITY);
61
62 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
64
65 pub const X: Self = Self::new(1.0, 0.0, 0.0);
67
68 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
70
71 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
73
74 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
76
77 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
79
80 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
82
83 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
85
86 #[inline(always)]
88 #[must_use]
89 pub const fn new(x: f32, y: f32, z: f32) -> Self {
90 unsafe { UnionCast { a: [x, y, z, z] }.v }
91 }
92
93 #[inline]
95 #[must_use]
96 pub const fn splat(v: f32) -> Self {
97 unsafe { UnionCast { a: [v; 4] }.v }
98 }
99
100 #[inline]
102 #[must_use]
103 pub fn map<F>(self, f: F) -> Self
104 where
105 F: Fn(f32) -> f32,
106 {
107 Self::new(f(self.x), f(self.y), f(self.z))
108 }
109
110 #[inline]
116 #[must_use]
117 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
118 Self(unsafe {
119 _mm_or_ps(
120 _mm_andnot_ps(mask.0, if_false.0),
121 _mm_and_ps(if_true.0, mask.0),
122 )
123 })
124 }
125
126 #[inline]
128 #[must_use]
129 pub const fn from_array(a: [f32; 3]) -> Self {
130 Self::new(a[0], a[1], a[2])
131 }
132
133 #[inline]
135 #[must_use]
136 pub const fn to_array(&self) -> [f32; 3] {
137 unsafe { *(self as *const Vec3A as *const [f32; 3]) }
138 }
139
140 #[inline]
146 #[must_use]
147 pub const fn from_slice(slice: &[f32]) -> Self {
148 assert!(slice.len() >= 3);
149 Self::new(slice[0], slice[1], slice[2])
150 }
151
152 #[inline]
158 pub fn write_to_slice(self, slice: &mut [f32]) {
159 slice[..3].copy_from_slice(&self.to_array());
160 }
161
162 #[inline]
166 #[must_use]
167 pub fn from_vec4(v: Vec4) -> Self {
168 Self(v.0)
169 }
170
171 #[inline]
173 #[must_use]
174 pub fn extend(self, w: f32) -> Vec4 {
175 Vec4::new(self.x, self.y, self.z, w)
176 }
177
178 #[inline]
182 #[must_use]
183 pub fn truncate(self) -> Vec2 {
184 use crate::swizzles::Vec3Swizzles;
185 self.xy()
186 }
187
188 #[inline]
190 #[must_use]
191 pub fn with_x(mut self, x: f32) -> Self {
192 self.x = x;
193 self
194 }
195
196 #[inline]
198 #[must_use]
199 pub fn with_y(mut self, y: f32) -> Self {
200 self.y = y;
201 self
202 }
203
204 #[inline]
206 #[must_use]
207 pub fn with_z(mut self, z: f32) -> Self {
208 self.z = z;
209 self
210 }
211
212 #[inline]
214 #[must_use]
215 pub fn dot(self, rhs: Self) -> f32 {
216 unsafe { dot3(self.0, rhs.0) }
217 }
218
219 #[inline]
221 #[must_use]
222 pub fn dot_into_vec(self, rhs: Self) -> Self {
223 Self(unsafe { dot3_into_m128(self.0, rhs.0) })
224 }
225
226 #[inline]
228 #[must_use]
229 pub fn cross(self, rhs: Self) -> Self {
230 unsafe {
231 let lhszxy = _mm_shuffle_ps(self.0, self.0, 0b01_01_00_10);
237 let rhszxy = _mm_shuffle_ps(rhs.0, rhs.0, 0b01_01_00_10);
238 let lhszxy_rhs = _mm_mul_ps(lhszxy, rhs.0);
239 let rhszxy_lhs = _mm_mul_ps(rhszxy, self.0);
240 let sub = _mm_sub_ps(lhszxy_rhs, rhszxy_lhs);
241 Self(_mm_shuffle_ps(sub, sub, 0b01_01_00_10))
242 }
243 }
244
245 #[inline]
249 #[must_use]
250 pub fn min(self, rhs: Self) -> Self {
251 Self(unsafe { _mm_min_ps(self.0, rhs.0) })
252 }
253
254 #[inline]
258 #[must_use]
259 pub fn max(self, rhs: Self) -> Self {
260 Self(unsafe { _mm_max_ps(self.0, rhs.0) })
261 }
262
263 #[inline]
271 #[must_use]
272 pub fn clamp(self, min: Self, max: Self) -> Self {
273 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
274 self.max(min).min(max)
275 }
276
277 #[inline]
281 #[must_use]
282 pub fn min_element(self) -> f32 {
283 unsafe {
284 let v = self.0;
285 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b01_01_10_10));
286 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
287 _mm_cvtss_f32(v)
288 }
289 }
290
291 #[inline]
295 #[must_use]
296 pub fn max_element(self) -> f32 {
297 unsafe {
298 let v = self.0;
299 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_10_10));
300 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
301 _mm_cvtss_f32(v)
302 }
303 }
304
305 #[inline]
309 #[must_use]
310 pub fn element_sum(self) -> f32 {
311 unsafe {
312 let v = self.0;
313 let v = _mm_add_ps(v, _mm_shuffle_ps(v, Self::ZERO.0, 0b00_11_00_01));
314 let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
315 _mm_cvtss_f32(v)
316 }
317 }
318
319 #[inline]
323 #[must_use]
324 pub fn element_product(self) -> f32 {
325 unsafe {
326 let v = self.0;
327 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, Self::ONE.0, 0b00_11_00_01));
328 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
329 _mm_cvtss_f32(v)
330 }
331 }
332
333 #[inline]
339 #[must_use]
340 pub fn cmpeq(self, rhs: Self) -> BVec3A {
341 BVec3A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
342 }
343
344 #[inline]
350 #[must_use]
351 pub fn cmpne(self, rhs: Self) -> BVec3A {
352 BVec3A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
353 }
354
355 #[inline]
361 #[must_use]
362 pub fn cmpge(self, rhs: Self) -> BVec3A {
363 BVec3A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
364 }
365
366 #[inline]
372 #[must_use]
373 pub fn cmpgt(self, rhs: Self) -> BVec3A {
374 BVec3A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
375 }
376
377 #[inline]
383 #[must_use]
384 pub fn cmple(self, rhs: Self) -> BVec3A {
385 BVec3A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
386 }
387
388 #[inline]
394 #[must_use]
395 pub fn cmplt(self, rhs: Self) -> BVec3A {
396 BVec3A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
397 }
398
399 #[inline]
401 #[must_use]
402 pub fn abs(self) -> Self {
403 Self(unsafe { crate::sse2::m128_abs(self.0) })
404 }
405
406 #[inline]
412 #[must_use]
413 pub fn signum(self) -> Self {
414 let result = Self(unsafe { _mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0) });
415 let mask = self.is_nan_mask();
416 Self::select(mask, self, result)
417 }
418
419 #[inline]
421 #[must_use]
422 pub fn copysign(self, rhs: Self) -> Self {
423 let mask = Self::splat(-0.0);
424 Self(unsafe { _mm_or_ps(_mm_and_ps(rhs.0, mask.0), _mm_andnot_ps(mask.0, self.0)) })
425 }
426
427 #[inline]
432 #[must_use]
433 pub fn is_negative_bitmask(self) -> u32 {
434 unsafe { (_mm_movemask_ps(self.0) as u32) & 0x7 }
435 }
436
437 #[inline]
440 #[must_use]
441 pub fn is_finite(self) -> bool {
442 self.is_finite_mask().all()
443 }
444
445 pub fn is_finite_mask(self) -> BVec3A {
449 BVec3A(unsafe { _mm_cmplt_ps(crate::sse2::m128_abs(self.0), Self::INFINITY.0) })
450 }
451
452 #[inline]
454 #[must_use]
455 pub fn is_nan(self) -> bool {
456 self.is_nan_mask().any()
457 }
458
459 #[inline]
463 #[must_use]
464 pub fn is_nan_mask(self) -> BVec3A {
465 BVec3A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
466 }
467
468 #[doc(alias = "magnitude")]
470 #[inline]
471 #[must_use]
472 pub fn length(self) -> f32 {
473 unsafe {
474 let dot = dot3_in_x(self.0, self.0);
475 _mm_cvtss_f32(_mm_sqrt_ps(dot))
476 }
477 }
478
479 #[doc(alias = "magnitude2")]
483 #[inline]
484 #[must_use]
485 pub fn length_squared(self) -> f32 {
486 self.dot(self)
487 }
488
489 #[inline]
493 #[must_use]
494 pub fn length_recip(self) -> f32 {
495 unsafe {
496 let dot = dot3_in_x(self.0, self.0);
497 _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
498 }
499 }
500
501 #[inline]
503 #[must_use]
504 pub fn distance(self, rhs: Self) -> f32 {
505 (self - rhs).length()
506 }
507
508 #[inline]
510 #[must_use]
511 pub fn distance_squared(self, rhs: Self) -> f32 {
512 (self - rhs).length_squared()
513 }
514
515 #[inline]
517 #[must_use]
518 pub fn div_euclid(self, rhs: Self) -> Self {
519 Self::new(
520 math::div_euclid(self.x, rhs.x),
521 math::div_euclid(self.y, rhs.y),
522 math::div_euclid(self.z, rhs.z),
523 )
524 }
525
526 #[inline]
530 #[must_use]
531 pub fn rem_euclid(self, rhs: Self) -> Self {
532 Self::new(
533 math::rem_euclid(self.x, rhs.x),
534 math::rem_euclid(self.y, rhs.y),
535 math::rem_euclid(self.z, rhs.z),
536 )
537 }
538
539 #[inline]
549 #[must_use]
550 pub fn normalize(self) -> Self {
551 unsafe {
552 let length = _mm_sqrt_ps(dot3_into_m128(self.0, self.0));
553 #[allow(clippy::let_and_return)]
554 let normalized = Self(_mm_div_ps(self.0, length));
555 glam_assert!(normalized.is_finite());
556 normalized
557 }
558 }
559
560 #[inline]
567 #[must_use]
568 pub fn try_normalize(self) -> Option<Self> {
569 let rcp = self.length_recip();
570 if rcp.is_finite() && rcp > 0.0 {
571 Some(self * rcp)
572 } else {
573 None
574 }
575 }
576
577 #[inline]
585 #[must_use]
586 pub fn normalize_or(self, fallback: Self) -> Self {
587 let rcp = self.length_recip();
588 if rcp.is_finite() && rcp > 0.0 {
589 self * rcp
590 } else {
591 fallback
592 }
593 }
594
595 #[inline]
602 #[must_use]
603 pub fn normalize_or_zero(self) -> Self {
604 self.normalize_or(Self::ZERO)
605 }
606
607 #[inline]
611 #[must_use]
612 pub fn is_normalized(self) -> bool {
613 math::abs(self.length_squared() - 1.0) <= 2e-4
614 }
615
616 #[inline]
624 #[must_use]
625 pub fn project_onto(self, rhs: Self) -> Self {
626 let other_len_sq_rcp = rhs.dot(rhs).recip();
627 glam_assert!(other_len_sq_rcp.is_finite());
628 rhs * self.dot(rhs) * other_len_sq_rcp
629 }
630
631 #[doc(alias("plane"))]
642 #[inline]
643 #[must_use]
644 pub fn reject_from(self, rhs: Self) -> Self {
645 self - self.project_onto(rhs)
646 }
647
648 #[inline]
656 #[must_use]
657 pub fn project_onto_normalized(self, rhs: Self) -> Self {
658 glam_assert!(rhs.is_normalized());
659 rhs * self.dot(rhs)
660 }
661
662 #[doc(alias("plane"))]
673 #[inline]
674 #[must_use]
675 pub fn reject_from_normalized(self, rhs: Self) -> Self {
676 self - self.project_onto_normalized(rhs)
677 }
678
679 #[inline]
682 #[must_use]
683 pub fn round(self) -> Self {
684 Self(unsafe { m128_round(self.0) })
685 }
686
687 #[inline]
690 #[must_use]
691 pub fn floor(self) -> Self {
692 Self(unsafe { m128_floor(self.0) })
693 }
694
695 #[inline]
698 #[must_use]
699 pub fn ceil(self) -> Self {
700 Self(unsafe { m128_ceil(self.0) })
701 }
702
703 #[inline]
706 #[must_use]
707 pub fn trunc(self) -> Self {
708 Self(unsafe { m128_trunc(self.0) })
709 }
710
711 #[inline]
718 #[must_use]
719 pub fn fract(self) -> Self {
720 self - self.trunc()
721 }
722
723 #[inline]
730 #[must_use]
731 pub fn fract_gl(self) -> Self {
732 self - self.floor()
733 }
734
735 #[inline]
738 #[must_use]
739 pub fn exp(self) -> Self {
740 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
741 }
742
743 #[inline]
745 #[must_use]
746 pub fn powf(self, n: f32) -> Self {
747 Self::new(
748 math::powf(self.x, n),
749 math::powf(self.y, n),
750 math::powf(self.z, n),
751 )
752 }
753
754 #[inline]
756 #[must_use]
757 pub fn recip(self) -> Self {
758 Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
759 }
760
761 #[doc(alias = "mix")]
767 #[inline]
768 #[must_use]
769 pub fn lerp(self, rhs: Self, s: f32) -> Self {
770 self * (1.0 - s) + rhs * s
771 }
772
773 #[inline]
778 #[must_use]
779 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
780 let a = rhs - *self;
781 let len = a.length();
782 if len <= d || len <= 1e-4 {
783 return rhs;
784 }
785 *self + a / len * d
786 }
787
788 #[inline]
794 pub fn midpoint(self, rhs: Self) -> Self {
795 (self + rhs) * 0.5
796 }
797
798 #[inline]
808 #[must_use]
809 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
810 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
811 }
812
813 #[inline]
819 #[must_use]
820 pub fn clamp_length(self, min: f32, max: f32) -> Self {
821 glam_assert!(0.0 <= min);
822 glam_assert!(min <= max);
823 let length_sq = self.length_squared();
824 if length_sq < min * min {
825 min * (self / math::sqrt(length_sq))
826 } else if length_sq > max * max {
827 max * (self / math::sqrt(length_sq))
828 } else {
829 self
830 }
831 }
832
833 #[inline]
839 #[must_use]
840 pub fn clamp_length_max(self, max: f32) -> Self {
841 glam_assert!(0.0 <= max);
842 let length_sq = self.length_squared();
843 if length_sq > max * max {
844 max * (self / math::sqrt(length_sq))
845 } else {
846 self
847 }
848 }
849
850 #[inline]
856 #[must_use]
857 pub fn clamp_length_min(self, min: f32) -> Self {
858 glam_assert!(0.0 <= min);
859 let length_sq = self.length_squared();
860 if length_sq < min * min {
861 min * (self / math::sqrt(length_sq))
862 } else {
863 self
864 }
865 }
866
867 #[inline]
875 #[must_use]
876 pub fn mul_add(self, a: Self, b: Self) -> Self {
877 #[cfg(target_feature = "fma")]
878 unsafe {
879 Self(_mm_fmadd_ps(self.0, a.0, b.0))
880 }
881 #[cfg(not(target_feature = "fma"))]
882 Self::new(
883 math::mul_add(self.x, a.x, b.x),
884 math::mul_add(self.y, a.y, b.y),
885 math::mul_add(self.z, a.z, b.z),
886 )
887 }
888
889 #[inline]
898 #[must_use]
899 pub fn reflect(self, normal: Self) -> Self {
900 glam_assert!(normal.is_normalized());
901 self - 2.0 * self.dot(normal) * normal
902 }
903
904 #[inline]
914 #[must_use]
915 pub fn refract(self, normal: Self, eta: f32) -> Self {
916 glam_assert!(self.is_normalized());
917 glam_assert!(normal.is_normalized());
918 let n_dot_i = normal.dot(self);
919 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
920 if k >= 0.0 {
921 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
922 } else {
923 Self::ZERO
924 }
925 }
926
927 #[inline]
931 #[must_use]
932 pub fn angle_between(self, rhs: Self) -> f32 {
933 math::acos_approx(
934 self.dot(rhs)
935 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
936 )
937 }
938
939 #[inline]
946 #[must_use]
947 pub fn any_orthogonal_vector(&self) -> Self {
948 if math::abs(self.x) > math::abs(self.y) {
950 Self::new(-self.z, 0.0, self.x) } else {
952 Self::new(0.0, self.z, -self.y) }
954 }
955
956 #[inline]
964 #[must_use]
965 pub fn any_orthonormal_vector(&self) -> Self {
966 glam_assert!(self.is_normalized());
967 let sign = math::signum(self.z);
969 let a = -1.0 / (sign + self.z);
970 let b = self.x * self.y * a;
971 Self::new(b, sign + self.y * self.y * a, -self.y)
972 }
973
974 #[inline]
981 #[must_use]
982 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
983 glam_assert!(self.is_normalized());
984 let sign = math::signum(self.z);
986 let a = -1.0 / (sign + self.z);
987 let b = self.x * self.y * a;
988 (
989 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
990 Self::new(b, sign + self.y * self.y * a, -self.y),
991 )
992 }
993
994 #[inline]
996 #[must_use]
997 pub fn as_dvec3(&self) -> crate::DVec3 {
998 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
999 }
1000
1001 #[inline]
1003 #[must_use]
1004 pub fn as_i8vec3(&self) -> crate::I8Vec3 {
1005 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1006 }
1007
1008 #[inline]
1010 #[must_use]
1011 pub fn as_u8vec3(&self) -> crate::U8Vec3 {
1012 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1013 }
1014
1015 #[inline]
1017 #[must_use]
1018 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
1019 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1020 }
1021
1022 #[inline]
1024 #[must_use]
1025 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
1026 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1027 }
1028
1029 #[inline]
1031 #[must_use]
1032 pub fn as_ivec3(&self) -> crate::IVec3 {
1033 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1034 }
1035
1036 #[inline]
1038 #[must_use]
1039 pub fn as_uvec3(&self) -> crate::UVec3 {
1040 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1041 }
1042
1043 #[inline]
1045 #[must_use]
1046 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
1047 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1048 }
1049
1050 #[inline]
1052 #[must_use]
1053 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
1054 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1055 }
1056}
1057
1058impl Default for Vec3A {
1059 #[inline(always)]
1060 fn default() -> Self {
1061 Self::ZERO
1062 }
1063}
1064
1065impl PartialEq for Vec3A {
1066 #[inline]
1067 fn eq(&self, rhs: &Self) -> bool {
1068 self.cmpeq(*rhs).all()
1069 }
1070}
1071
1072impl Div<Vec3A> for Vec3A {
1073 type Output = Self;
1074 #[inline]
1075 fn div(self, rhs: Self) -> Self {
1076 Self(unsafe { _mm_div_ps(self.0, rhs.0) })
1077 }
1078}
1079
1080impl Div<&Vec3A> for Vec3A {
1081 type Output = Vec3A;
1082 #[inline]
1083 fn div(self, rhs: &Vec3A) -> Vec3A {
1084 self.div(*rhs)
1085 }
1086}
1087
1088impl Div<&Vec3A> for &Vec3A {
1089 type Output = Vec3A;
1090 #[inline]
1091 fn div(self, rhs: &Vec3A) -> Vec3A {
1092 (*self).div(*rhs)
1093 }
1094}
1095
1096impl Div<Vec3A> for &Vec3A {
1097 type Output = Vec3A;
1098 #[inline]
1099 fn div(self, rhs: Vec3A) -> Vec3A {
1100 (*self).div(rhs)
1101 }
1102}
1103
1104impl DivAssign<Vec3A> for Vec3A {
1105 #[inline]
1106 fn div_assign(&mut self, rhs: Self) {
1107 self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
1108 }
1109}
1110
1111impl DivAssign<&Self> for Vec3A {
1112 #[inline]
1113 fn div_assign(&mut self, rhs: &Self) {
1114 self.div_assign(*rhs)
1115 }
1116}
1117
1118impl Div<f32> for Vec3A {
1119 type Output = Self;
1120 #[inline]
1121 fn div(self, rhs: f32) -> Self {
1122 Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
1123 }
1124}
1125
1126impl Div<&f32> for Vec3A {
1127 type Output = Vec3A;
1128 #[inline]
1129 fn div(self, rhs: &f32) -> Vec3A {
1130 self.div(*rhs)
1131 }
1132}
1133
1134impl Div<&f32> for &Vec3A {
1135 type Output = Vec3A;
1136 #[inline]
1137 fn div(self, rhs: &f32) -> Vec3A {
1138 (*self).div(*rhs)
1139 }
1140}
1141
1142impl Div<f32> for &Vec3A {
1143 type Output = Vec3A;
1144 #[inline]
1145 fn div(self, rhs: f32) -> Vec3A {
1146 (*self).div(rhs)
1147 }
1148}
1149
1150impl DivAssign<f32> for Vec3A {
1151 #[inline]
1152 fn div_assign(&mut self, rhs: f32) {
1153 self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
1154 }
1155}
1156
1157impl DivAssign<&f32> for Vec3A {
1158 #[inline]
1159 fn div_assign(&mut self, rhs: &f32) {
1160 self.div_assign(*rhs)
1161 }
1162}
1163
1164impl Div<Vec3A> for f32 {
1165 type Output = Vec3A;
1166 #[inline]
1167 fn div(self, rhs: Vec3A) -> Vec3A {
1168 Vec3A(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
1169 }
1170}
1171
1172impl Div<&Vec3A> for f32 {
1173 type Output = Vec3A;
1174 #[inline]
1175 fn div(self, rhs: &Vec3A) -> Vec3A {
1176 self.div(*rhs)
1177 }
1178}
1179
1180impl Div<&Vec3A> for &f32 {
1181 type Output = Vec3A;
1182 #[inline]
1183 fn div(self, rhs: &Vec3A) -> Vec3A {
1184 (*self).div(*rhs)
1185 }
1186}
1187
1188impl Div<Vec3A> for &f32 {
1189 type Output = Vec3A;
1190 #[inline]
1191 fn div(self, rhs: Vec3A) -> Vec3A {
1192 (*self).div(rhs)
1193 }
1194}
1195
1196impl Mul<Vec3A> for Vec3A {
1197 type Output = Self;
1198 #[inline]
1199 fn mul(self, rhs: Self) -> Self {
1200 Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
1201 }
1202}
1203
1204impl Mul<&Vec3A> for Vec3A {
1205 type Output = Vec3A;
1206 #[inline]
1207 fn mul(self, rhs: &Vec3A) -> Vec3A {
1208 self.mul(*rhs)
1209 }
1210}
1211
1212impl Mul<&Vec3A> for &Vec3A {
1213 type Output = Vec3A;
1214 #[inline]
1215 fn mul(self, rhs: &Vec3A) -> Vec3A {
1216 (*self).mul(*rhs)
1217 }
1218}
1219
1220impl Mul<Vec3A> for &Vec3A {
1221 type Output = Vec3A;
1222 #[inline]
1223 fn mul(self, rhs: Vec3A) -> Vec3A {
1224 (*self).mul(rhs)
1225 }
1226}
1227
1228impl MulAssign<Vec3A> for Vec3A {
1229 #[inline]
1230 fn mul_assign(&mut self, rhs: Self) {
1231 self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
1232 }
1233}
1234
1235impl MulAssign<&Self> for Vec3A {
1236 #[inline]
1237 fn mul_assign(&mut self, rhs: &Self) {
1238 self.mul_assign(*rhs)
1239 }
1240}
1241
1242impl Mul<f32> for Vec3A {
1243 type Output = Self;
1244 #[inline]
1245 fn mul(self, rhs: f32) -> Self {
1246 Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
1247 }
1248}
1249
1250impl Mul<&f32> for Vec3A {
1251 type Output = Vec3A;
1252 #[inline]
1253 fn mul(self, rhs: &f32) -> Vec3A {
1254 self.mul(*rhs)
1255 }
1256}
1257
1258impl Mul<&f32> for &Vec3A {
1259 type Output = Vec3A;
1260 #[inline]
1261 fn mul(self, rhs: &f32) -> Vec3A {
1262 (*self).mul(*rhs)
1263 }
1264}
1265
1266impl Mul<f32> for &Vec3A {
1267 type Output = Vec3A;
1268 #[inline]
1269 fn mul(self, rhs: f32) -> Vec3A {
1270 (*self).mul(rhs)
1271 }
1272}
1273
1274impl MulAssign<f32> for Vec3A {
1275 #[inline]
1276 fn mul_assign(&mut self, rhs: f32) {
1277 self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
1278 }
1279}
1280
1281impl MulAssign<&f32> for Vec3A {
1282 #[inline]
1283 fn mul_assign(&mut self, rhs: &f32) {
1284 self.mul_assign(*rhs)
1285 }
1286}
1287
1288impl Mul<Vec3A> for f32 {
1289 type Output = Vec3A;
1290 #[inline]
1291 fn mul(self, rhs: Vec3A) -> Vec3A {
1292 Vec3A(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
1293 }
1294}
1295
1296impl Mul<&Vec3A> for f32 {
1297 type Output = Vec3A;
1298 #[inline]
1299 fn mul(self, rhs: &Vec3A) -> Vec3A {
1300 self.mul(*rhs)
1301 }
1302}
1303
1304impl Mul<&Vec3A> for &f32 {
1305 type Output = Vec3A;
1306 #[inline]
1307 fn mul(self, rhs: &Vec3A) -> Vec3A {
1308 (*self).mul(*rhs)
1309 }
1310}
1311
1312impl Mul<Vec3A> for &f32 {
1313 type Output = Vec3A;
1314 #[inline]
1315 fn mul(self, rhs: Vec3A) -> Vec3A {
1316 (*self).mul(rhs)
1317 }
1318}
1319
1320impl Add<Vec3A> for Vec3A {
1321 type Output = Self;
1322 #[inline]
1323 fn add(self, rhs: Self) -> Self {
1324 Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1325 }
1326}
1327
1328impl Add<&Vec3A> for Vec3A {
1329 type Output = Vec3A;
1330 #[inline]
1331 fn add(self, rhs: &Vec3A) -> Vec3A {
1332 self.add(*rhs)
1333 }
1334}
1335
1336impl Add<&Vec3A> for &Vec3A {
1337 type Output = Vec3A;
1338 #[inline]
1339 fn add(self, rhs: &Vec3A) -> Vec3A {
1340 (*self).add(*rhs)
1341 }
1342}
1343
1344impl Add<Vec3A> for &Vec3A {
1345 type Output = Vec3A;
1346 #[inline]
1347 fn add(self, rhs: Vec3A) -> Vec3A {
1348 (*self).add(rhs)
1349 }
1350}
1351
1352impl AddAssign<Vec3A> for Vec3A {
1353 #[inline]
1354 fn add_assign(&mut self, rhs: Self) {
1355 self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1356 }
1357}
1358
1359impl AddAssign<&Self> for Vec3A {
1360 #[inline]
1361 fn add_assign(&mut self, rhs: &Self) {
1362 self.add_assign(*rhs)
1363 }
1364}
1365
1366impl Add<f32> for Vec3A {
1367 type Output = Self;
1368 #[inline]
1369 fn add(self, rhs: f32) -> Self {
1370 Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1371 }
1372}
1373
1374impl Add<&f32> for Vec3A {
1375 type Output = Vec3A;
1376 #[inline]
1377 fn add(self, rhs: &f32) -> Vec3A {
1378 self.add(*rhs)
1379 }
1380}
1381
1382impl Add<&f32> for &Vec3A {
1383 type Output = Vec3A;
1384 #[inline]
1385 fn add(self, rhs: &f32) -> Vec3A {
1386 (*self).add(*rhs)
1387 }
1388}
1389
1390impl Add<f32> for &Vec3A {
1391 type Output = Vec3A;
1392 #[inline]
1393 fn add(self, rhs: f32) -> Vec3A {
1394 (*self).add(rhs)
1395 }
1396}
1397
1398impl AddAssign<f32> for Vec3A {
1399 #[inline]
1400 fn add_assign(&mut self, rhs: f32) {
1401 self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1402 }
1403}
1404
1405impl AddAssign<&f32> for Vec3A {
1406 #[inline]
1407 fn add_assign(&mut self, rhs: &f32) {
1408 self.add_assign(*rhs)
1409 }
1410}
1411
1412impl Add<Vec3A> for f32 {
1413 type Output = Vec3A;
1414 #[inline]
1415 fn add(self, rhs: Vec3A) -> Vec3A {
1416 Vec3A(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1417 }
1418}
1419
1420impl Add<&Vec3A> for f32 {
1421 type Output = Vec3A;
1422 #[inline]
1423 fn add(self, rhs: &Vec3A) -> Vec3A {
1424 self.add(*rhs)
1425 }
1426}
1427
1428impl Add<&Vec3A> for &f32 {
1429 type Output = Vec3A;
1430 #[inline]
1431 fn add(self, rhs: &Vec3A) -> Vec3A {
1432 (*self).add(*rhs)
1433 }
1434}
1435
1436impl Add<Vec3A> for &f32 {
1437 type Output = Vec3A;
1438 #[inline]
1439 fn add(self, rhs: Vec3A) -> Vec3A {
1440 (*self).add(rhs)
1441 }
1442}
1443
1444impl Sub<Vec3A> for Vec3A {
1445 type Output = Self;
1446 #[inline]
1447 fn sub(self, rhs: Self) -> Self {
1448 Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1449 }
1450}
1451
1452impl Sub<&Vec3A> for Vec3A {
1453 type Output = Vec3A;
1454 #[inline]
1455 fn sub(self, rhs: &Vec3A) -> Vec3A {
1456 self.sub(*rhs)
1457 }
1458}
1459
1460impl Sub<&Vec3A> for &Vec3A {
1461 type Output = Vec3A;
1462 #[inline]
1463 fn sub(self, rhs: &Vec3A) -> Vec3A {
1464 (*self).sub(*rhs)
1465 }
1466}
1467
1468impl Sub<Vec3A> for &Vec3A {
1469 type Output = Vec3A;
1470 #[inline]
1471 fn sub(self, rhs: Vec3A) -> Vec3A {
1472 (*self).sub(rhs)
1473 }
1474}
1475
1476impl SubAssign<Vec3A> for Vec3A {
1477 #[inline]
1478 fn sub_assign(&mut self, rhs: Vec3A) {
1479 self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1480 }
1481}
1482
1483impl SubAssign<&Self> for Vec3A {
1484 #[inline]
1485 fn sub_assign(&mut self, rhs: &Self) {
1486 self.sub_assign(*rhs)
1487 }
1488}
1489
1490impl Sub<f32> for Vec3A {
1491 type Output = Self;
1492 #[inline]
1493 fn sub(self, rhs: f32) -> Self {
1494 Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1495 }
1496}
1497
1498impl Sub<&f32> for Vec3A {
1499 type Output = Vec3A;
1500 #[inline]
1501 fn sub(self, rhs: &f32) -> Vec3A {
1502 self.sub(*rhs)
1503 }
1504}
1505
1506impl Sub<&f32> for &Vec3A {
1507 type Output = Vec3A;
1508 #[inline]
1509 fn sub(self, rhs: &f32) -> Vec3A {
1510 (*self).sub(*rhs)
1511 }
1512}
1513
1514impl Sub<f32> for &Vec3A {
1515 type Output = Vec3A;
1516 #[inline]
1517 fn sub(self, rhs: f32) -> Vec3A {
1518 (*self).sub(rhs)
1519 }
1520}
1521
1522impl SubAssign<f32> for Vec3A {
1523 #[inline]
1524 fn sub_assign(&mut self, rhs: f32) {
1525 self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1526 }
1527}
1528
1529impl SubAssign<&f32> for Vec3A {
1530 #[inline]
1531 fn sub_assign(&mut self, rhs: &f32) {
1532 self.sub_assign(*rhs)
1533 }
1534}
1535
1536impl Sub<Vec3A> for f32 {
1537 type Output = Vec3A;
1538 #[inline]
1539 fn sub(self, rhs: Vec3A) -> Vec3A {
1540 Vec3A(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1541 }
1542}
1543
1544impl Sub<&Vec3A> for f32 {
1545 type Output = Vec3A;
1546 #[inline]
1547 fn sub(self, rhs: &Vec3A) -> Vec3A {
1548 self.sub(*rhs)
1549 }
1550}
1551
1552impl Sub<&Vec3A> for &f32 {
1553 type Output = Vec3A;
1554 #[inline]
1555 fn sub(self, rhs: &Vec3A) -> Vec3A {
1556 (*self).sub(*rhs)
1557 }
1558}
1559
1560impl Sub<Vec3A> for &f32 {
1561 type Output = Vec3A;
1562 #[inline]
1563 fn sub(self, rhs: Vec3A) -> Vec3A {
1564 (*self).sub(rhs)
1565 }
1566}
1567
1568impl Rem<Vec3A> for Vec3A {
1569 type Output = Self;
1570 #[inline]
1571 fn rem(self, rhs: Self) -> Self {
1572 unsafe {
1573 let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1574 Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1575 }
1576 }
1577}
1578
1579impl Rem<&Vec3A> for Vec3A {
1580 type Output = Vec3A;
1581 #[inline]
1582 fn rem(self, rhs: &Vec3A) -> Vec3A {
1583 self.rem(*rhs)
1584 }
1585}
1586
1587impl Rem<&Vec3A> for &Vec3A {
1588 type Output = Vec3A;
1589 #[inline]
1590 fn rem(self, rhs: &Vec3A) -> Vec3A {
1591 (*self).rem(*rhs)
1592 }
1593}
1594
1595impl Rem<Vec3A> for &Vec3A {
1596 type Output = Vec3A;
1597 #[inline]
1598 fn rem(self, rhs: Vec3A) -> Vec3A {
1599 (*self).rem(rhs)
1600 }
1601}
1602
1603impl RemAssign<Vec3A> for Vec3A {
1604 #[inline]
1605 fn rem_assign(&mut self, rhs: Self) {
1606 *self = self.rem(rhs);
1607 }
1608}
1609
1610impl RemAssign<&Self> for Vec3A {
1611 #[inline]
1612 fn rem_assign(&mut self, rhs: &Self) {
1613 self.rem_assign(*rhs)
1614 }
1615}
1616
1617impl Rem<f32> for Vec3A {
1618 type Output = Self;
1619 #[inline]
1620 fn rem(self, rhs: f32) -> Self {
1621 self.rem(Self::splat(rhs))
1622 }
1623}
1624
1625impl Rem<&f32> for Vec3A {
1626 type Output = Vec3A;
1627 #[inline]
1628 fn rem(self, rhs: &f32) -> Vec3A {
1629 self.rem(*rhs)
1630 }
1631}
1632
1633impl Rem<&f32> for &Vec3A {
1634 type Output = Vec3A;
1635 #[inline]
1636 fn rem(self, rhs: &f32) -> Vec3A {
1637 (*self).rem(*rhs)
1638 }
1639}
1640
1641impl Rem<f32> for &Vec3A {
1642 type Output = Vec3A;
1643 #[inline]
1644 fn rem(self, rhs: f32) -> Vec3A {
1645 (*self).rem(rhs)
1646 }
1647}
1648
1649impl RemAssign<f32> for Vec3A {
1650 #[inline]
1651 fn rem_assign(&mut self, rhs: f32) {
1652 *self = self.rem(Self::splat(rhs));
1653 }
1654}
1655
1656impl RemAssign<&f32> for Vec3A {
1657 #[inline]
1658 fn rem_assign(&mut self, rhs: &f32) {
1659 self.rem_assign(*rhs)
1660 }
1661}
1662
1663impl Rem<Vec3A> for f32 {
1664 type Output = Vec3A;
1665 #[inline]
1666 fn rem(self, rhs: Vec3A) -> Vec3A {
1667 Vec3A::splat(self).rem(rhs)
1668 }
1669}
1670
1671impl Rem<&Vec3A> for f32 {
1672 type Output = Vec3A;
1673 #[inline]
1674 fn rem(self, rhs: &Vec3A) -> Vec3A {
1675 self.rem(*rhs)
1676 }
1677}
1678
1679impl Rem<&Vec3A> for &f32 {
1680 type Output = Vec3A;
1681 #[inline]
1682 fn rem(self, rhs: &Vec3A) -> Vec3A {
1683 (*self).rem(*rhs)
1684 }
1685}
1686
1687impl Rem<Vec3A> for &f32 {
1688 type Output = Vec3A;
1689 #[inline]
1690 fn rem(self, rhs: Vec3A) -> Vec3A {
1691 (*self).rem(rhs)
1692 }
1693}
1694
1695#[cfg(not(target_arch = "spirv"))]
1696impl AsRef<[f32; 3]> for Vec3A {
1697 #[inline]
1698 fn as_ref(&self) -> &[f32; 3] {
1699 unsafe { &*(self as *const Vec3A as *const [f32; 3]) }
1700 }
1701}
1702
1703#[cfg(not(target_arch = "spirv"))]
1704impl AsMut<[f32; 3]> for Vec3A {
1705 #[inline]
1706 fn as_mut(&mut self) -> &mut [f32; 3] {
1707 unsafe { &mut *(self as *mut Vec3A as *mut [f32; 3]) }
1708 }
1709}
1710
1711impl Sum for Vec3A {
1712 #[inline]
1713 fn sum<I>(iter: I) -> Self
1714 where
1715 I: Iterator<Item = Self>,
1716 {
1717 iter.fold(Self::ZERO, Self::add)
1718 }
1719}
1720
1721impl<'a> Sum<&'a Self> for Vec3A {
1722 #[inline]
1723 fn sum<I>(iter: I) -> Self
1724 where
1725 I: Iterator<Item = &'a Self>,
1726 {
1727 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1728 }
1729}
1730
1731impl Product for Vec3A {
1732 #[inline]
1733 fn product<I>(iter: I) -> Self
1734 where
1735 I: Iterator<Item = Self>,
1736 {
1737 iter.fold(Self::ONE, Self::mul)
1738 }
1739}
1740
1741impl<'a> Product<&'a Self> for Vec3A {
1742 #[inline]
1743 fn product<I>(iter: I) -> Self
1744 where
1745 I: Iterator<Item = &'a Self>,
1746 {
1747 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1748 }
1749}
1750
1751impl Neg for Vec3A {
1752 type Output = Self;
1753 #[inline]
1754 fn neg(self) -> Self {
1755 Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1756 }
1757}
1758
1759impl Neg for &Vec3A {
1760 type Output = Vec3A;
1761 #[inline]
1762 fn neg(self) -> Vec3A {
1763 (*self).neg()
1764 }
1765}
1766
1767impl Index<usize> for Vec3A {
1768 type Output = f32;
1769 #[inline]
1770 fn index(&self, index: usize) -> &Self::Output {
1771 match index {
1772 0 => &self.x,
1773 1 => &self.y,
1774 2 => &self.z,
1775 _ => panic!("index out of bounds"),
1776 }
1777 }
1778}
1779
1780impl IndexMut<usize> for Vec3A {
1781 #[inline]
1782 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1783 match index {
1784 0 => &mut self.x,
1785 1 => &mut self.y,
1786 2 => &mut self.z,
1787 _ => panic!("index out of bounds"),
1788 }
1789 }
1790}
1791
1792impl fmt::Display for Vec3A {
1793 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1794 if let Some(p) = f.precision() {
1795 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
1796 } else {
1797 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
1798 }
1799 }
1800}
1801
1802impl fmt::Debug for Vec3A {
1803 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1804 fmt.debug_tuple(stringify!(Vec3A))
1805 .field(&self.x)
1806 .field(&self.y)
1807 .field(&self.z)
1808 .finish()
1809 }
1810}
1811
1812impl From<Vec3A> for __m128 {
1813 #[inline(always)]
1814 fn from(t: Vec3A) -> Self {
1815 t.0
1816 }
1817}
1818
1819impl From<__m128> for Vec3A {
1820 #[inline(always)]
1821 fn from(t: __m128) -> Self {
1822 Self(t)
1823 }
1824}
1825
1826impl From<[f32; 3]> for Vec3A {
1827 #[inline]
1828 fn from(a: [f32; 3]) -> Self {
1829 Self::new(a[0], a[1], a[2])
1830 }
1831}
1832
1833impl From<Vec3A> for [f32; 3] {
1834 #[inline]
1835 fn from(v: Vec3A) -> Self {
1836 use crate::Align16;
1837 use core::mem::MaybeUninit;
1838 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1839 unsafe {
1840 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1841 out.assume_init().0
1842 }
1843 }
1844}
1845
1846impl From<(f32, f32, f32)> for Vec3A {
1847 #[inline]
1848 fn from(t: (f32, f32, f32)) -> Self {
1849 Self::new(t.0, t.1, t.2)
1850 }
1851}
1852
1853impl From<Vec3A> for (f32, f32, f32) {
1854 #[inline]
1855 fn from(v: Vec3A) -> Self {
1856 use crate::Align16;
1857 use core::mem::MaybeUninit;
1858 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1859 unsafe {
1860 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1861 out.assume_init().0
1862 }
1863 }
1864}
1865
1866impl From<Vec3> for Vec3A {
1867 #[inline]
1868 fn from(v: Vec3) -> Self {
1869 Self::new(v.x, v.y, v.z)
1870 }
1871}
1872
1873impl From<Vec3A> for Vec3 {
1874 #[inline]
1875 fn from(v: Vec3A) -> Self {
1876 use crate::Align16;
1877 use core::mem::MaybeUninit;
1878 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1879 unsafe {
1880 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1881 out.assume_init().0
1882 }
1883 }
1884}
1885
1886impl From<(Vec2, f32)> for Vec3A {
1887 #[inline]
1888 fn from((v, z): (Vec2, f32)) -> Self {
1889 Self::new(v.x, v.y, z)
1890 }
1891}
1892
1893impl Deref for Vec3A {
1894 type Target = crate::deref::Vec3<f32>;
1895 #[inline]
1896 fn deref(&self) -> &Self::Target {
1897 unsafe { &*(self as *const Self).cast() }
1898 }
1899}
1900
1901impl DerefMut for Vec3A {
1902 #[inline]
1903 fn deref_mut(&mut self) -> &mut Self::Target {
1904 unsafe { &mut *(self as *mut Self).cast() }
1905 }
1906}
1907
1908impl From<BVec3> for Vec3A {
1909 #[inline]
1910 fn from(v: BVec3) -> Self {
1911 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
1912 }
1913}
1914
1915impl From<BVec3A> for Vec3A {
1916 #[inline]
1917 fn from(v: BVec3A) -> Self {
1918 let bool_array: [bool; 3] = v.into();
1919 Self::new(
1920 f32::from(bool_array[0]),
1921 f32::from(bool_array[1]),
1922 f32::from(bool_array[2]),
1923 )
1924 }
1925}