1#[cfg(not(feature = "scalar-math"))]
4use crate::BVec4A;
5use crate::{BVec4, I16Vec4, I64Vec2, I64Vec3, I8Vec4, IVec4, U16Vec4, U64Vec4, U8Vec4, UVec4};
6
7use core::fmt;
8use core::iter::{Product, Sum};
9use core::{f32, ops::*};
10
11#[inline(always)]
13#[must_use]
14pub const fn i64vec4(x: i64, y: i64, z: i64, w: i64) -> I64Vec4 {
15 I64Vec4::new(x, y, z, w)
16}
17
18#[cfg_attr(not(target_arch = "spirv"), derive(Hash))]
20#[derive(Clone, Copy, PartialEq, Eq)]
21#[cfg_attr(feature = "cuda", repr(align(16)))]
22#[cfg_attr(not(target_arch = "spirv"), repr(C))]
23#[cfg_attr(target_arch = "spirv", repr(simd))]
24pub struct I64Vec4 {
25 pub x: i64,
26 pub y: i64,
27 pub z: i64,
28 pub w: i64,
29}
30
31impl I64Vec4 {
32 pub const ZERO: Self = Self::splat(0);
34
35 pub const ONE: Self = Self::splat(1);
37
38 pub const NEG_ONE: Self = Self::splat(-1);
40
41 pub const MIN: Self = Self::splat(i64::MIN);
43
44 pub const MAX: Self = Self::splat(i64::MAX);
46
47 pub const X: Self = Self::new(1, 0, 0, 0);
49
50 pub const Y: Self = Self::new(0, 1, 0, 0);
52
53 pub const Z: Self = Self::new(0, 0, 1, 0);
55
56 pub const W: Self = Self::new(0, 0, 0, 1);
58
59 pub const NEG_X: Self = Self::new(-1, 0, 0, 0);
61
62 pub const NEG_Y: Self = Self::new(0, -1, 0, 0);
64
65 pub const NEG_Z: Self = Self::new(0, 0, -1, 0);
67
68 pub const NEG_W: Self = Self::new(0, 0, 0, -1);
70
71 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
73
74 #[inline(always)]
76 #[must_use]
77 pub const fn new(x: i64, y: i64, z: i64, w: i64) -> Self {
78 Self { x, y, z, w }
79 }
80
81 #[inline]
83 #[must_use]
84 pub const fn splat(v: i64) -> Self {
85 Self {
86 x: v,
87
88 y: v,
89
90 z: v,
91
92 w: v,
93 }
94 }
95
96 #[inline]
98 #[must_use]
99 pub fn map<F>(self, f: F) -> Self
100 where
101 F: Fn(i64) -> i64,
102 {
103 Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
104 }
105
106 #[inline]
112 #[must_use]
113 pub fn select(mask: BVec4, if_true: Self, if_false: Self) -> Self {
114 Self {
115 x: if mask.test(0) { if_true.x } else { if_false.x },
116 y: if mask.test(1) { if_true.y } else { if_false.y },
117 z: if mask.test(2) { if_true.z } else { if_false.z },
118 w: if mask.test(3) { if_true.w } else { if_false.w },
119 }
120 }
121
122 #[inline]
124 #[must_use]
125 pub const fn from_array(a: [i64; 4]) -> Self {
126 Self::new(a[0], a[1], a[2], a[3])
127 }
128
129 #[inline]
131 #[must_use]
132 pub const fn to_array(&self) -> [i64; 4] {
133 [self.x, self.y, self.z, self.w]
134 }
135
136 #[inline]
142 #[must_use]
143 pub const fn from_slice(slice: &[i64]) -> Self {
144 assert!(slice.len() >= 4);
145 Self::new(slice[0], slice[1], slice[2], slice[3])
146 }
147
148 #[inline]
154 pub fn write_to_slice(self, slice: &mut [i64]) {
155 slice[..4].copy_from_slice(&self.to_array());
156 }
157
158 #[inline]
162 #[must_use]
163 pub fn truncate(self) -> I64Vec3 {
164 use crate::swizzles::Vec4Swizzles;
165 self.xyz()
166 }
167
168 #[inline]
170 #[must_use]
171 pub fn with_x(mut self, x: i64) -> Self {
172 self.x = x;
173 self
174 }
175
176 #[inline]
178 #[must_use]
179 pub fn with_y(mut self, y: i64) -> Self {
180 self.y = y;
181 self
182 }
183
184 #[inline]
186 #[must_use]
187 pub fn with_z(mut self, z: i64) -> Self {
188 self.z = z;
189 self
190 }
191
192 #[inline]
194 #[must_use]
195 pub fn with_w(mut self, w: i64) -> Self {
196 self.w = w;
197 self
198 }
199
200 #[inline]
202 #[must_use]
203 pub fn dot(self, rhs: Self) -> i64 {
204 (self.x * rhs.x) + (self.y * rhs.y) + (self.z * rhs.z) + (self.w * rhs.w)
205 }
206
207 #[inline]
209 #[must_use]
210 pub fn dot_into_vec(self, rhs: Self) -> Self {
211 Self::splat(self.dot(rhs))
212 }
213
214 #[inline]
218 #[must_use]
219 pub fn min(self, rhs: Self) -> Self {
220 Self {
221 x: self.x.min(rhs.x),
222 y: self.y.min(rhs.y),
223 z: self.z.min(rhs.z),
224 w: self.w.min(rhs.w),
225 }
226 }
227
228 #[inline]
232 #[must_use]
233 pub fn max(self, rhs: Self) -> Self {
234 Self {
235 x: self.x.max(rhs.x),
236 y: self.y.max(rhs.y),
237 z: self.z.max(rhs.z),
238 w: self.w.max(rhs.w),
239 }
240 }
241
242 #[inline]
250 #[must_use]
251 pub fn clamp(self, min: Self, max: Self) -> Self {
252 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
253 self.max(min).min(max)
254 }
255
256 #[inline]
260 #[must_use]
261 pub fn min_element(self) -> i64 {
262 self.x.min(self.y.min(self.z.min(self.w)))
263 }
264
265 #[inline]
269 #[must_use]
270 pub fn max_element(self) -> i64 {
271 self.x.max(self.y.max(self.z.max(self.w)))
272 }
273
274 #[inline]
278 #[must_use]
279 pub fn element_sum(self) -> i64 {
280 self.x + self.y + self.z + self.w
281 }
282
283 #[inline]
287 #[must_use]
288 pub fn element_product(self) -> i64 {
289 self.x * self.y * self.z * self.w
290 }
291
292 #[inline]
298 #[must_use]
299 pub fn cmpeq(self, rhs: Self) -> BVec4 {
300 BVec4::new(
301 self.x.eq(&rhs.x),
302 self.y.eq(&rhs.y),
303 self.z.eq(&rhs.z),
304 self.w.eq(&rhs.w),
305 )
306 }
307
308 #[inline]
314 #[must_use]
315 pub fn cmpne(self, rhs: Self) -> BVec4 {
316 BVec4::new(
317 self.x.ne(&rhs.x),
318 self.y.ne(&rhs.y),
319 self.z.ne(&rhs.z),
320 self.w.ne(&rhs.w),
321 )
322 }
323
324 #[inline]
330 #[must_use]
331 pub fn cmpge(self, rhs: Self) -> BVec4 {
332 BVec4::new(
333 self.x.ge(&rhs.x),
334 self.y.ge(&rhs.y),
335 self.z.ge(&rhs.z),
336 self.w.ge(&rhs.w),
337 )
338 }
339
340 #[inline]
346 #[must_use]
347 pub fn cmpgt(self, rhs: Self) -> BVec4 {
348 BVec4::new(
349 self.x.gt(&rhs.x),
350 self.y.gt(&rhs.y),
351 self.z.gt(&rhs.z),
352 self.w.gt(&rhs.w),
353 )
354 }
355
356 #[inline]
362 #[must_use]
363 pub fn cmple(self, rhs: Self) -> BVec4 {
364 BVec4::new(
365 self.x.le(&rhs.x),
366 self.y.le(&rhs.y),
367 self.z.le(&rhs.z),
368 self.w.le(&rhs.w),
369 )
370 }
371
372 #[inline]
378 #[must_use]
379 pub fn cmplt(self, rhs: Self) -> BVec4 {
380 BVec4::new(
381 self.x.lt(&rhs.x),
382 self.y.lt(&rhs.y),
383 self.z.lt(&rhs.z),
384 self.w.lt(&rhs.w),
385 )
386 }
387
388 #[inline]
390 #[must_use]
391 pub fn abs(self) -> Self {
392 Self {
393 x: self.x.abs(),
394 y: self.y.abs(),
395 z: self.z.abs(),
396 w: self.w.abs(),
397 }
398 }
399
400 #[inline]
406 #[must_use]
407 pub fn signum(self) -> Self {
408 Self {
409 x: self.x.signum(),
410 y: self.y.signum(),
411 z: self.z.signum(),
412 w: self.w.signum(),
413 }
414 }
415
416 #[inline]
421 #[must_use]
422 pub fn is_negative_bitmask(self) -> u32 {
423 (self.x.is_negative() as u32)
424 | (self.y.is_negative() as u32) << 1
425 | (self.z.is_negative() as u32) << 2
426 | (self.w.is_negative() as u32) << 3
427 }
428
429 #[doc(alias = "magnitude2")]
431 #[inline]
432 #[must_use]
433 pub fn length_squared(self) -> i64 {
434 self.dot(self)
435 }
436
437 #[inline]
439 #[must_use]
440 pub fn distance_squared(self, rhs: Self) -> i64 {
441 (self - rhs).length_squared()
442 }
443
444 #[inline]
449 #[must_use]
450 pub fn div_euclid(self, rhs: Self) -> Self {
451 Self::new(
452 self.x.div_euclid(rhs.x),
453 self.y.div_euclid(rhs.y),
454 self.z.div_euclid(rhs.z),
455 self.w.div_euclid(rhs.w),
456 )
457 }
458
459 #[inline]
466 #[must_use]
467 pub fn rem_euclid(self, rhs: Self) -> Self {
468 Self::new(
469 self.x.rem_euclid(rhs.x),
470 self.y.rem_euclid(rhs.y),
471 self.z.rem_euclid(rhs.z),
472 self.w.rem_euclid(rhs.w),
473 )
474 }
475
476 #[inline]
478 #[must_use]
479 pub fn as_vec4(&self) -> crate::Vec4 {
480 crate::Vec4::new(self.x as f32, self.y as f32, self.z as f32, self.w as f32)
481 }
482
483 #[inline]
485 #[must_use]
486 pub fn as_dvec4(&self) -> crate::DVec4 {
487 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
488 }
489
490 #[inline]
492 #[must_use]
493 pub fn as_i8vec4(&self) -> crate::I8Vec4 {
494 crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
495 }
496
497 #[inline]
499 #[must_use]
500 pub fn as_u8vec4(&self) -> crate::U8Vec4 {
501 crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
502 }
503
504 #[inline]
506 #[must_use]
507 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
508 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
509 }
510
511 #[inline]
513 #[must_use]
514 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
515 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
516 }
517
518 #[inline]
520 #[must_use]
521 pub fn as_ivec4(&self) -> crate::IVec4 {
522 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
523 }
524
525 #[inline]
527 #[must_use]
528 pub fn as_uvec4(&self) -> crate::UVec4 {
529 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
530 }
531
532 #[inline]
534 #[must_use]
535 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
536 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
537 }
538
539 #[inline]
543 #[must_use]
544 pub const fn wrapping_add(self, rhs: Self) -> Self {
545 Self {
546 x: self.x.wrapping_add(rhs.x),
547 y: self.y.wrapping_add(rhs.y),
548 z: self.z.wrapping_add(rhs.z),
549 w: self.w.wrapping_add(rhs.w),
550 }
551 }
552
553 #[inline]
557 #[must_use]
558 pub const fn wrapping_sub(self, rhs: Self) -> Self {
559 Self {
560 x: self.x.wrapping_sub(rhs.x),
561 y: self.y.wrapping_sub(rhs.y),
562 z: self.z.wrapping_sub(rhs.z),
563 w: self.w.wrapping_sub(rhs.w),
564 }
565 }
566
567 #[inline]
571 #[must_use]
572 pub const fn wrapping_mul(self, rhs: Self) -> Self {
573 Self {
574 x: self.x.wrapping_mul(rhs.x),
575 y: self.y.wrapping_mul(rhs.y),
576 z: self.z.wrapping_mul(rhs.z),
577 w: self.w.wrapping_mul(rhs.w),
578 }
579 }
580
581 #[inline]
585 #[must_use]
586 pub const fn wrapping_div(self, rhs: Self) -> Self {
587 Self {
588 x: self.x.wrapping_div(rhs.x),
589 y: self.y.wrapping_div(rhs.y),
590 z: self.z.wrapping_div(rhs.z),
591 w: self.w.wrapping_div(rhs.w),
592 }
593 }
594
595 #[inline]
599 #[must_use]
600 pub const fn saturating_add(self, rhs: Self) -> Self {
601 Self {
602 x: self.x.saturating_add(rhs.x),
603 y: self.y.saturating_add(rhs.y),
604 z: self.z.saturating_add(rhs.z),
605 w: self.w.saturating_add(rhs.w),
606 }
607 }
608
609 #[inline]
613 #[must_use]
614 pub const fn saturating_sub(self, rhs: Self) -> Self {
615 Self {
616 x: self.x.saturating_sub(rhs.x),
617 y: self.y.saturating_sub(rhs.y),
618 z: self.z.saturating_sub(rhs.z),
619 w: self.w.saturating_sub(rhs.w),
620 }
621 }
622
623 #[inline]
627 #[must_use]
628 pub const fn saturating_mul(self, rhs: Self) -> Self {
629 Self {
630 x: self.x.saturating_mul(rhs.x),
631 y: self.y.saturating_mul(rhs.y),
632 z: self.z.saturating_mul(rhs.z),
633 w: self.w.saturating_mul(rhs.w),
634 }
635 }
636
637 #[inline]
641 #[must_use]
642 pub const fn saturating_div(self, rhs: Self) -> Self {
643 Self {
644 x: self.x.saturating_div(rhs.x),
645 y: self.y.saturating_div(rhs.y),
646 z: self.z.saturating_div(rhs.z),
647 w: self.w.saturating_div(rhs.w),
648 }
649 }
650
651 #[inline]
655 #[must_use]
656 pub const fn wrapping_add_unsigned(self, rhs: U64Vec4) -> Self {
657 Self {
658 x: self.x.wrapping_add_unsigned(rhs.x),
659 y: self.y.wrapping_add_unsigned(rhs.y),
660 z: self.z.wrapping_add_unsigned(rhs.z),
661 w: self.w.wrapping_add_unsigned(rhs.w),
662 }
663 }
664
665 #[inline]
669 #[must_use]
670 pub const fn wrapping_sub_unsigned(self, rhs: U64Vec4) -> Self {
671 Self {
672 x: self.x.wrapping_sub_unsigned(rhs.x),
673 y: self.y.wrapping_sub_unsigned(rhs.y),
674 z: self.z.wrapping_sub_unsigned(rhs.z),
675 w: self.w.wrapping_sub_unsigned(rhs.w),
676 }
677 }
678
679 #[inline]
683 #[must_use]
684 pub const fn saturating_add_unsigned(self, rhs: U64Vec4) -> Self {
685 Self {
686 x: self.x.saturating_add_unsigned(rhs.x),
687 y: self.y.saturating_add_unsigned(rhs.y),
688 z: self.z.saturating_add_unsigned(rhs.z),
689 w: self.w.saturating_add_unsigned(rhs.w),
690 }
691 }
692
693 #[inline]
697 #[must_use]
698 pub const fn saturating_sub_unsigned(self, rhs: U64Vec4) -> Self {
699 Self {
700 x: self.x.saturating_sub_unsigned(rhs.x),
701 y: self.y.saturating_sub_unsigned(rhs.y),
702 z: self.z.saturating_sub_unsigned(rhs.z),
703 w: self.w.saturating_sub_unsigned(rhs.w),
704 }
705 }
706}
707
708impl Default for I64Vec4 {
709 #[inline(always)]
710 fn default() -> Self {
711 Self::ZERO
712 }
713}
714
715impl Div<I64Vec4> for I64Vec4 {
716 type Output = Self;
717 #[inline]
718 fn div(self, rhs: Self) -> Self {
719 Self {
720 x: self.x.div(rhs.x),
721 y: self.y.div(rhs.y),
722 z: self.z.div(rhs.z),
723 w: self.w.div(rhs.w),
724 }
725 }
726}
727
728impl Div<&I64Vec4> for I64Vec4 {
729 type Output = I64Vec4;
730 #[inline]
731 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
732 self.div(*rhs)
733 }
734}
735
736impl Div<&I64Vec4> for &I64Vec4 {
737 type Output = I64Vec4;
738 #[inline]
739 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
740 (*self).div(*rhs)
741 }
742}
743
744impl Div<I64Vec4> for &I64Vec4 {
745 type Output = I64Vec4;
746 #[inline]
747 fn div(self, rhs: I64Vec4) -> I64Vec4 {
748 (*self).div(rhs)
749 }
750}
751
752impl DivAssign<I64Vec4> for I64Vec4 {
753 #[inline]
754 fn div_assign(&mut self, rhs: Self) {
755 self.x.div_assign(rhs.x);
756 self.y.div_assign(rhs.y);
757 self.z.div_assign(rhs.z);
758 self.w.div_assign(rhs.w);
759 }
760}
761
762impl DivAssign<&Self> for I64Vec4 {
763 #[inline]
764 fn div_assign(&mut self, rhs: &Self) {
765 self.div_assign(*rhs)
766 }
767}
768
769impl Div<i64> for I64Vec4 {
770 type Output = Self;
771 #[inline]
772 fn div(self, rhs: i64) -> Self {
773 Self {
774 x: self.x.div(rhs),
775 y: self.y.div(rhs),
776 z: self.z.div(rhs),
777 w: self.w.div(rhs),
778 }
779 }
780}
781
782impl Div<&i64> for I64Vec4 {
783 type Output = I64Vec4;
784 #[inline]
785 fn div(self, rhs: &i64) -> I64Vec4 {
786 self.div(*rhs)
787 }
788}
789
790impl Div<&i64> for &I64Vec4 {
791 type Output = I64Vec4;
792 #[inline]
793 fn div(self, rhs: &i64) -> I64Vec4 {
794 (*self).div(*rhs)
795 }
796}
797
798impl Div<i64> for &I64Vec4 {
799 type Output = I64Vec4;
800 #[inline]
801 fn div(self, rhs: i64) -> I64Vec4 {
802 (*self).div(rhs)
803 }
804}
805
806impl DivAssign<i64> for I64Vec4 {
807 #[inline]
808 fn div_assign(&mut self, rhs: i64) {
809 self.x.div_assign(rhs);
810 self.y.div_assign(rhs);
811 self.z.div_assign(rhs);
812 self.w.div_assign(rhs);
813 }
814}
815
816impl DivAssign<&i64> for I64Vec4 {
817 #[inline]
818 fn div_assign(&mut self, rhs: &i64) {
819 self.div_assign(*rhs)
820 }
821}
822
823impl Div<I64Vec4> for i64 {
824 type Output = I64Vec4;
825 #[inline]
826 fn div(self, rhs: I64Vec4) -> I64Vec4 {
827 I64Vec4 {
828 x: self.div(rhs.x),
829 y: self.div(rhs.y),
830 z: self.div(rhs.z),
831 w: self.div(rhs.w),
832 }
833 }
834}
835
836impl Div<&I64Vec4> for i64 {
837 type Output = I64Vec4;
838 #[inline]
839 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
840 self.div(*rhs)
841 }
842}
843
844impl Div<&I64Vec4> for &i64 {
845 type Output = I64Vec4;
846 #[inline]
847 fn div(self, rhs: &I64Vec4) -> I64Vec4 {
848 (*self).div(*rhs)
849 }
850}
851
852impl Div<I64Vec4> for &i64 {
853 type Output = I64Vec4;
854 #[inline]
855 fn div(self, rhs: I64Vec4) -> I64Vec4 {
856 (*self).div(rhs)
857 }
858}
859
860impl Mul<I64Vec4> for I64Vec4 {
861 type Output = Self;
862 #[inline]
863 fn mul(self, rhs: Self) -> Self {
864 Self {
865 x: self.x.mul(rhs.x),
866 y: self.y.mul(rhs.y),
867 z: self.z.mul(rhs.z),
868 w: self.w.mul(rhs.w),
869 }
870 }
871}
872
873impl Mul<&I64Vec4> for I64Vec4 {
874 type Output = I64Vec4;
875 #[inline]
876 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
877 self.mul(*rhs)
878 }
879}
880
881impl Mul<&I64Vec4> for &I64Vec4 {
882 type Output = I64Vec4;
883 #[inline]
884 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
885 (*self).mul(*rhs)
886 }
887}
888
889impl Mul<I64Vec4> for &I64Vec4 {
890 type Output = I64Vec4;
891 #[inline]
892 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
893 (*self).mul(rhs)
894 }
895}
896
897impl MulAssign<I64Vec4> for I64Vec4 {
898 #[inline]
899 fn mul_assign(&mut self, rhs: Self) {
900 self.x.mul_assign(rhs.x);
901 self.y.mul_assign(rhs.y);
902 self.z.mul_assign(rhs.z);
903 self.w.mul_assign(rhs.w);
904 }
905}
906
907impl MulAssign<&Self> for I64Vec4 {
908 #[inline]
909 fn mul_assign(&mut self, rhs: &Self) {
910 self.mul_assign(*rhs)
911 }
912}
913
914impl Mul<i64> for I64Vec4 {
915 type Output = Self;
916 #[inline]
917 fn mul(self, rhs: i64) -> Self {
918 Self {
919 x: self.x.mul(rhs),
920 y: self.y.mul(rhs),
921 z: self.z.mul(rhs),
922 w: self.w.mul(rhs),
923 }
924 }
925}
926
927impl Mul<&i64> for I64Vec4 {
928 type Output = I64Vec4;
929 #[inline]
930 fn mul(self, rhs: &i64) -> I64Vec4 {
931 self.mul(*rhs)
932 }
933}
934
935impl Mul<&i64> for &I64Vec4 {
936 type Output = I64Vec4;
937 #[inline]
938 fn mul(self, rhs: &i64) -> I64Vec4 {
939 (*self).mul(*rhs)
940 }
941}
942
943impl Mul<i64> for &I64Vec4 {
944 type Output = I64Vec4;
945 #[inline]
946 fn mul(self, rhs: i64) -> I64Vec4 {
947 (*self).mul(rhs)
948 }
949}
950
951impl MulAssign<i64> for I64Vec4 {
952 #[inline]
953 fn mul_assign(&mut self, rhs: i64) {
954 self.x.mul_assign(rhs);
955 self.y.mul_assign(rhs);
956 self.z.mul_assign(rhs);
957 self.w.mul_assign(rhs);
958 }
959}
960
961impl MulAssign<&i64> for I64Vec4 {
962 #[inline]
963 fn mul_assign(&mut self, rhs: &i64) {
964 self.mul_assign(*rhs)
965 }
966}
967
968impl Mul<I64Vec4> for i64 {
969 type Output = I64Vec4;
970 #[inline]
971 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
972 I64Vec4 {
973 x: self.mul(rhs.x),
974 y: self.mul(rhs.y),
975 z: self.mul(rhs.z),
976 w: self.mul(rhs.w),
977 }
978 }
979}
980
981impl Mul<&I64Vec4> for i64 {
982 type Output = I64Vec4;
983 #[inline]
984 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
985 self.mul(*rhs)
986 }
987}
988
989impl Mul<&I64Vec4> for &i64 {
990 type Output = I64Vec4;
991 #[inline]
992 fn mul(self, rhs: &I64Vec4) -> I64Vec4 {
993 (*self).mul(*rhs)
994 }
995}
996
997impl Mul<I64Vec4> for &i64 {
998 type Output = I64Vec4;
999 #[inline]
1000 fn mul(self, rhs: I64Vec4) -> I64Vec4 {
1001 (*self).mul(rhs)
1002 }
1003}
1004
1005impl Add<I64Vec4> for I64Vec4 {
1006 type Output = Self;
1007 #[inline]
1008 fn add(self, rhs: Self) -> Self {
1009 Self {
1010 x: self.x.add(rhs.x),
1011 y: self.y.add(rhs.y),
1012 z: self.z.add(rhs.z),
1013 w: self.w.add(rhs.w),
1014 }
1015 }
1016}
1017
1018impl Add<&I64Vec4> for I64Vec4 {
1019 type Output = I64Vec4;
1020 #[inline]
1021 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1022 self.add(*rhs)
1023 }
1024}
1025
1026impl Add<&I64Vec4> for &I64Vec4 {
1027 type Output = I64Vec4;
1028 #[inline]
1029 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1030 (*self).add(*rhs)
1031 }
1032}
1033
1034impl Add<I64Vec4> for &I64Vec4 {
1035 type Output = I64Vec4;
1036 #[inline]
1037 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1038 (*self).add(rhs)
1039 }
1040}
1041
1042impl AddAssign<I64Vec4> for I64Vec4 {
1043 #[inline]
1044 fn add_assign(&mut self, rhs: Self) {
1045 self.x.add_assign(rhs.x);
1046 self.y.add_assign(rhs.y);
1047 self.z.add_assign(rhs.z);
1048 self.w.add_assign(rhs.w);
1049 }
1050}
1051
1052impl AddAssign<&Self> for I64Vec4 {
1053 #[inline]
1054 fn add_assign(&mut self, rhs: &Self) {
1055 self.add_assign(*rhs)
1056 }
1057}
1058
1059impl Add<i64> for I64Vec4 {
1060 type Output = Self;
1061 #[inline]
1062 fn add(self, rhs: i64) -> Self {
1063 Self {
1064 x: self.x.add(rhs),
1065 y: self.y.add(rhs),
1066 z: self.z.add(rhs),
1067 w: self.w.add(rhs),
1068 }
1069 }
1070}
1071
1072impl Add<&i64> for I64Vec4 {
1073 type Output = I64Vec4;
1074 #[inline]
1075 fn add(self, rhs: &i64) -> I64Vec4 {
1076 self.add(*rhs)
1077 }
1078}
1079
1080impl Add<&i64> for &I64Vec4 {
1081 type Output = I64Vec4;
1082 #[inline]
1083 fn add(self, rhs: &i64) -> I64Vec4 {
1084 (*self).add(*rhs)
1085 }
1086}
1087
1088impl Add<i64> for &I64Vec4 {
1089 type Output = I64Vec4;
1090 #[inline]
1091 fn add(self, rhs: i64) -> I64Vec4 {
1092 (*self).add(rhs)
1093 }
1094}
1095
1096impl AddAssign<i64> for I64Vec4 {
1097 #[inline]
1098 fn add_assign(&mut self, rhs: i64) {
1099 self.x.add_assign(rhs);
1100 self.y.add_assign(rhs);
1101 self.z.add_assign(rhs);
1102 self.w.add_assign(rhs);
1103 }
1104}
1105
1106impl AddAssign<&i64> for I64Vec4 {
1107 #[inline]
1108 fn add_assign(&mut self, rhs: &i64) {
1109 self.add_assign(*rhs)
1110 }
1111}
1112
1113impl Add<I64Vec4> for i64 {
1114 type Output = I64Vec4;
1115 #[inline]
1116 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1117 I64Vec4 {
1118 x: self.add(rhs.x),
1119 y: self.add(rhs.y),
1120 z: self.add(rhs.z),
1121 w: self.add(rhs.w),
1122 }
1123 }
1124}
1125
1126impl Add<&I64Vec4> for i64 {
1127 type Output = I64Vec4;
1128 #[inline]
1129 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1130 self.add(*rhs)
1131 }
1132}
1133
1134impl Add<&I64Vec4> for &i64 {
1135 type Output = I64Vec4;
1136 #[inline]
1137 fn add(self, rhs: &I64Vec4) -> I64Vec4 {
1138 (*self).add(*rhs)
1139 }
1140}
1141
1142impl Add<I64Vec4> for &i64 {
1143 type Output = I64Vec4;
1144 #[inline]
1145 fn add(self, rhs: I64Vec4) -> I64Vec4 {
1146 (*self).add(rhs)
1147 }
1148}
1149
1150impl Sub<I64Vec4> for I64Vec4 {
1151 type Output = Self;
1152 #[inline]
1153 fn sub(self, rhs: Self) -> Self {
1154 Self {
1155 x: self.x.sub(rhs.x),
1156 y: self.y.sub(rhs.y),
1157 z: self.z.sub(rhs.z),
1158 w: self.w.sub(rhs.w),
1159 }
1160 }
1161}
1162
1163impl Sub<&I64Vec4> for I64Vec4 {
1164 type Output = I64Vec4;
1165 #[inline]
1166 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1167 self.sub(*rhs)
1168 }
1169}
1170
1171impl Sub<&I64Vec4> for &I64Vec4 {
1172 type Output = I64Vec4;
1173 #[inline]
1174 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1175 (*self).sub(*rhs)
1176 }
1177}
1178
1179impl Sub<I64Vec4> for &I64Vec4 {
1180 type Output = I64Vec4;
1181 #[inline]
1182 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1183 (*self).sub(rhs)
1184 }
1185}
1186
1187impl SubAssign<I64Vec4> for I64Vec4 {
1188 #[inline]
1189 fn sub_assign(&mut self, rhs: I64Vec4) {
1190 self.x.sub_assign(rhs.x);
1191 self.y.sub_assign(rhs.y);
1192 self.z.sub_assign(rhs.z);
1193 self.w.sub_assign(rhs.w);
1194 }
1195}
1196
1197impl SubAssign<&Self> for I64Vec4 {
1198 #[inline]
1199 fn sub_assign(&mut self, rhs: &Self) {
1200 self.sub_assign(*rhs)
1201 }
1202}
1203
1204impl Sub<i64> for I64Vec4 {
1205 type Output = Self;
1206 #[inline]
1207 fn sub(self, rhs: i64) -> Self {
1208 Self {
1209 x: self.x.sub(rhs),
1210 y: self.y.sub(rhs),
1211 z: self.z.sub(rhs),
1212 w: self.w.sub(rhs),
1213 }
1214 }
1215}
1216
1217impl Sub<&i64> for I64Vec4 {
1218 type Output = I64Vec4;
1219 #[inline]
1220 fn sub(self, rhs: &i64) -> I64Vec4 {
1221 self.sub(*rhs)
1222 }
1223}
1224
1225impl Sub<&i64> for &I64Vec4 {
1226 type Output = I64Vec4;
1227 #[inline]
1228 fn sub(self, rhs: &i64) -> I64Vec4 {
1229 (*self).sub(*rhs)
1230 }
1231}
1232
1233impl Sub<i64> for &I64Vec4 {
1234 type Output = I64Vec4;
1235 #[inline]
1236 fn sub(self, rhs: i64) -> I64Vec4 {
1237 (*self).sub(rhs)
1238 }
1239}
1240
1241impl SubAssign<i64> for I64Vec4 {
1242 #[inline]
1243 fn sub_assign(&mut self, rhs: i64) {
1244 self.x.sub_assign(rhs);
1245 self.y.sub_assign(rhs);
1246 self.z.sub_assign(rhs);
1247 self.w.sub_assign(rhs);
1248 }
1249}
1250
1251impl SubAssign<&i64> for I64Vec4 {
1252 #[inline]
1253 fn sub_assign(&mut self, rhs: &i64) {
1254 self.sub_assign(*rhs)
1255 }
1256}
1257
1258impl Sub<I64Vec4> for i64 {
1259 type Output = I64Vec4;
1260 #[inline]
1261 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1262 I64Vec4 {
1263 x: self.sub(rhs.x),
1264 y: self.sub(rhs.y),
1265 z: self.sub(rhs.z),
1266 w: self.sub(rhs.w),
1267 }
1268 }
1269}
1270
1271impl Sub<&I64Vec4> for i64 {
1272 type Output = I64Vec4;
1273 #[inline]
1274 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1275 self.sub(*rhs)
1276 }
1277}
1278
1279impl Sub<&I64Vec4> for &i64 {
1280 type Output = I64Vec4;
1281 #[inline]
1282 fn sub(self, rhs: &I64Vec4) -> I64Vec4 {
1283 (*self).sub(*rhs)
1284 }
1285}
1286
1287impl Sub<I64Vec4> for &i64 {
1288 type Output = I64Vec4;
1289 #[inline]
1290 fn sub(self, rhs: I64Vec4) -> I64Vec4 {
1291 (*self).sub(rhs)
1292 }
1293}
1294
1295impl Rem<I64Vec4> for I64Vec4 {
1296 type Output = Self;
1297 #[inline]
1298 fn rem(self, rhs: Self) -> Self {
1299 Self {
1300 x: self.x.rem(rhs.x),
1301 y: self.y.rem(rhs.y),
1302 z: self.z.rem(rhs.z),
1303 w: self.w.rem(rhs.w),
1304 }
1305 }
1306}
1307
1308impl Rem<&I64Vec4> for I64Vec4 {
1309 type Output = I64Vec4;
1310 #[inline]
1311 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1312 self.rem(*rhs)
1313 }
1314}
1315
1316impl Rem<&I64Vec4> for &I64Vec4 {
1317 type Output = I64Vec4;
1318 #[inline]
1319 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1320 (*self).rem(*rhs)
1321 }
1322}
1323
1324impl Rem<I64Vec4> for &I64Vec4 {
1325 type Output = I64Vec4;
1326 #[inline]
1327 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1328 (*self).rem(rhs)
1329 }
1330}
1331
1332impl RemAssign<I64Vec4> for I64Vec4 {
1333 #[inline]
1334 fn rem_assign(&mut self, rhs: Self) {
1335 self.x.rem_assign(rhs.x);
1336 self.y.rem_assign(rhs.y);
1337 self.z.rem_assign(rhs.z);
1338 self.w.rem_assign(rhs.w);
1339 }
1340}
1341
1342impl RemAssign<&Self> for I64Vec4 {
1343 #[inline]
1344 fn rem_assign(&mut self, rhs: &Self) {
1345 self.rem_assign(*rhs)
1346 }
1347}
1348
1349impl Rem<i64> for I64Vec4 {
1350 type Output = Self;
1351 #[inline]
1352 fn rem(self, rhs: i64) -> Self {
1353 Self {
1354 x: self.x.rem(rhs),
1355 y: self.y.rem(rhs),
1356 z: self.z.rem(rhs),
1357 w: self.w.rem(rhs),
1358 }
1359 }
1360}
1361
1362impl Rem<&i64> for I64Vec4 {
1363 type Output = I64Vec4;
1364 #[inline]
1365 fn rem(self, rhs: &i64) -> I64Vec4 {
1366 self.rem(*rhs)
1367 }
1368}
1369
1370impl Rem<&i64> for &I64Vec4 {
1371 type Output = I64Vec4;
1372 #[inline]
1373 fn rem(self, rhs: &i64) -> I64Vec4 {
1374 (*self).rem(*rhs)
1375 }
1376}
1377
1378impl Rem<i64> for &I64Vec4 {
1379 type Output = I64Vec4;
1380 #[inline]
1381 fn rem(self, rhs: i64) -> I64Vec4 {
1382 (*self).rem(rhs)
1383 }
1384}
1385
1386impl RemAssign<i64> for I64Vec4 {
1387 #[inline]
1388 fn rem_assign(&mut self, rhs: i64) {
1389 self.x.rem_assign(rhs);
1390 self.y.rem_assign(rhs);
1391 self.z.rem_assign(rhs);
1392 self.w.rem_assign(rhs);
1393 }
1394}
1395
1396impl RemAssign<&i64> for I64Vec4 {
1397 #[inline]
1398 fn rem_assign(&mut self, rhs: &i64) {
1399 self.rem_assign(*rhs)
1400 }
1401}
1402
1403impl Rem<I64Vec4> for i64 {
1404 type Output = I64Vec4;
1405 #[inline]
1406 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1407 I64Vec4 {
1408 x: self.rem(rhs.x),
1409 y: self.rem(rhs.y),
1410 z: self.rem(rhs.z),
1411 w: self.rem(rhs.w),
1412 }
1413 }
1414}
1415
1416impl Rem<&I64Vec4> for i64 {
1417 type Output = I64Vec4;
1418 #[inline]
1419 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1420 self.rem(*rhs)
1421 }
1422}
1423
1424impl Rem<&I64Vec4> for &i64 {
1425 type Output = I64Vec4;
1426 #[inline]
1427 fn rem(self, rhs: &I64Vec4) -> I64Vec4 {
1428 (*self).rem(*rhs)
1429 }
1430}
1431
1432impl Rem<I64Vec4> for &i64 {
1433 type Output = I64Vec4;
1434 #[inline]
1435 fn rem(self, rhs: I64Vec4) -> I64Vec4 {
1436 (*self).rem(rhs)
1437 }
1438}
1439
1440#[cfg(not(target_arch = "spirv"))]
1441impl AsRef<[i64; 4]> for I64Vec4 {
1442 #[inline]
1443 fn as_ref(&self) -> &[i64; 4] {
1444 unsafe { &*(self as *const I64Vec4 as *const [i64; 4]) }
1445 }
1446}
1447
1448#[cfg(not(target_arch = "spirv"))]
1449impl AsMut<[i64; 4]> for I64Vec4 {
1450 #[inline]
1451 fn as_mut(&mut self) -> &mut [i64; 4] {
1452 unsafe { &mut *(self as *mut I64Vec4 as *mut [i64; 4]) }
1453 }
1454}
1455
1456impl Sum for I64Vec4 {
1457 #[inline]
1458 fn sum<I>(iter: I) -> Self
1459 where
1460 I: Iterator<Item = Self>,
1461 {
1462 iter.fold(Self::ZERO, Self::add)
1463 }
1464}
1465
1466impl<'a> Sum<&'a Self> for I64Vec4 {
1467 #[inline]
1468 fn sum<I>(iter: I) -> Self
1469 where
1470 I: Iterator<Item = &'a Self>,
1471 {
1472 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1473 }
1474}
1475
1476impl Product for I64Vec4 {
1477 #[inline]
1478 fn product<I>(iter: I) -> Self
1479 where
1480 I: Iterator<Item = Self>,
1481 {
1482 iter.fold(Self::ONE, Self::mul)
1483 }
1484}
1485
1486impl<'a> Product<&'a Self> for I64Vec4 {
1487 #[inline]
1488 fn product<I>(iter: I) -> Self
1489 where
1490 I: Iterator<Item = &'a Self>,
1491 {
1492 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1493 }
1494}
1495
1496impl Neg for I64Vec4 {
1497 type Output = Self;
1498 #[inline]
1499 fn neg(self) -> Self {
1500 Self {
1501 x: self.x.neg(),
1502 y: self.y.neg(),
1503 z: self.z.neg(),
1504 w: self.w.neg(),
1505 }
1506 }
1507}
1508
1509impl Neg for &I64Vec4 {
1510 type Output = I64Vec4;
1511 #[inline]
1512 fn neg(self) -> I64Vec4 {
1513 (*self).neg()
1514 }
1515}
1516
1517impl Not for I64Vec4 {
1518 type Output = Self;
1519 #[inline]
1520 fn not(self) -> Self::Output {
1521 Self {
1522 x: self.x.not(),
1523 y: self.y.not(),
1524 z: self.z.not(),
1525 w: self.w.not(),
1526 }
1527 }
1528}
1529
1530impl BitAnd for I64Vec4 {
1531 type Output = Self;
1532 #[inline]
1533 fn bitand(self, rhs: Self) -> Self::Output {
1534 Self {
1535 x: self.x.bitand(rhs.x),
1536 y: self.y.bitand(rhs.y),
1537 z: self.z.bitand(rhs.z),
1538 w: self.w.bitand(rhs.w),
1539 }
1540 }
1541}
1542
1543impl BitOr for I64Vec4 {
1544 type Output = Self;
1545 #[inline]
1546 fn bitor(self, rhs: Self) -> Self::Output {
1547 Self {
1548 x: self.x.bitor(rhs.x),
1549 y: self.y.bitor(rhs.y),
1550 z: self.z.bitor(rhs.z),
1551 w: self.w.bitor(rhs.w),
1552 }
1553 }
1554}
1555
1556impl BitXor for I64Vec4 {
1557 type Output = Self;
1558 #[inline]
1559 fn bitxor(self, rhs: Self) -> Self::Output {
1560 Self {
1561 x: self.x.bitxor(rhs.x),
1562 y: self.y.bitxor(rhs.y),
1563 z: self.z.bitxor(rhs.z),
1564 w: self.w.bitxor(rhs.w),
1565 }
1566 }
1567}
1568
1569impl BitAnd<i64> for I64Vec4 {
1570 type Output = Self;
1571 #[inline]
1572 fn bitand(self, rhs: i64) -> Self::Output {
1573 Self {
1574 x: self.x.bitand(rhs),
1575 y: self.y.bitand(rhs),
1576 z: self.z.bitand(rhs),
1577 w: self.w.bitand(rhs),
1578 }
1579 }
1580}
1581
1582impl BitOr<i64> for I64Vec4 {
1583 type Output = Self;
1584 #[inline]
1585 fn bitor(self, rhs: i64) -> Self::Output {
1586 Self {
1587 x: self.x.bitor(rhs),
1588 y: self.y.bitor(rhs),
1589 z: self.z.bitor(rhs),
1590 w: self.w.bitor(rhs),
1591 }
1592 }
1593}
1594
1595impl BitXor<i64> for I64Vec4 {
1596 type Output = Self;
1597 #[inline]
1598 fn bitxor(self, rhs: i64) -> Self::Output {
1599 Self {
1600 x: self.x.bitxor(rhs),
1601 y: self.y.bitxor(rhs),
1602 z: self.z.bitxor(rhs),
1603 w: self.w.bitxor(rhs),
1604 }
1605 }
1606}
1607
1608impl Shl<i8> for I64Vec4 {
1609 type Output = Self;
1610 #[inline]
1611 fn shl(self, rhs: i8) -> Self::Output {
1612 Self {
1613 x: self.x.shl(rhs),
1614 y: self.y.shl(rhs),
1615 z: self.z.shl(rhs),
1616 w: self.w.shl(rhs),
1617 }
1618 }
1619}
1620
1621impl Shr<i8> for I64Vec4 {
1622 type Output = Self;
1623 #[inline]
1624 fn shr(self, rhs: i8) -> Self::Output {
1625 Self {
1626 x: self.x.shr(rhs),
1627 y: self.y.shr(rhs),
1628 z: self.z.shr(rhs),
1629 w: self.w.shr(rhs),
1630 }
1631 }
1632}
1633
1634impl Shl<i16> for I64Vec4 {
1635 type Output = Self;
1636 #[inline]
1637 fn shl(self, rhs: i16) -> Self::Output {
1638 Self {
1639 x: self.x.shl(rhs),
1640 y: self.y.shl(rhs),
1641 z: self.z.shl(rhs),
1642 w: self.w.shl(rhs),
1643 }
1644 }
1645}
1646
1647impl Shr<i16> for I64Vec4 {
1648 type Output = Self;
1649 #[inline]
1650 fn shr(self, rhs: i16) -> Self::Output {
1651 Self {
1652 x: self.x.shr(rhs),
1653 y: self.y.shr(rhs),
1654 z: self.z.shr(rhs),
1655 w: self.w.shr(rhs),
1656 }
1657 }
1658}
1659
1660impl Shl<i32> for I64Vec4 {
1661 type Output = Self;
1662 #[inline]
1663 fn shl(self, rhs: i32) -> Self::Output {
1664 Self {
1665 x: self.x.shl(rhs),
1666 y: self.y.shl(rhs),
1667 z: self.z.shl(rhs),
1668 w: self.w.shl(rhs),
1669 }
1670 }
1671}
1672
1673impl Shr<i32> for I64Vec4 {
1674 type Output = Self;
1675 #[inline]
1676 fn shr(self, rhs: i32) -> Self::Output {
1677 Self {
1678 x: self.x.shr(rhs),
1679 y: self.y.shr(rhs),
1680 z: self.z.shr(rhs),
1681 w: self.w.shr(rhs),
1682 }
1683 }
1684}
1685
1686impl Shl<i64> for I64Vec4 {
1687 type Output = Self;
1688 #[inline]
1689 fn shl(self, rhs: i64) -> Self::Output {
1690 Self {
1691 x: self.x.shl(rhs),
1692 y: self.y.shl(rhs),
1693 z: self.z.shl(rhs),
1694 w: self.w.shl(rhs),
1695 }
1696 }
1697}
1698
1699impl Shr<i64> for I64Vec4 {
1700 type Output = Self;
1701 #[inline]
1702 fn shr(self, rhs: i64) -> Self::Output {
1703 Self {
1704 x: self.x.shr(rhs),
1705 y: self.y.shr(rhs),
1706 z: self.z.shr(rhs),
1707 w: self.w.shr(rhs),
1708 }
1709 }
1710}
1711
1712impl Shl<u8> for I64Vec4 {
1713 type Output = Self;
1714 #[inline]
1715 fn shl(self, rhs: u8) -> Self::Output {
1716 Self {
1717 x: self.x.shl(rhs),
1718 y: self.y.shl(rhs),
1719 z: self.z.shl(rhs),
1720 w: self.w.shl(rhs),
1721 }
1722 }
1723}
1724
1725impl Shr<u8> for I64Vec4 {
1726 type Output = Self;
1727 #[inline]
1728 fn shr(self, rhs: u8) -> Self::Output {
1729 Self {
1730 x: self.x.shr(rhs),
1731 y: self.y.shr(rhs),
1732 z: self.z.shr(rhs),
1733 w: self.w.shr(rhs),
1734 }
1735 }
1736}
1737
1738impl Shl<u16> for I64Vec4 {
1739 type Output = Self;
1740 #[inline]
1741 fn shl(self, rhs: u16) -> Self::Output {
1742 Self {
1743 x: self.x.shl(rhs),
1744 y: self.y.shl(rhs),
1745 z: self.z.shl(rhs),
1746 w: self.w.shl(rhs),
1747 }
1748 }
1749}
1750
1751impl Shr<u16> for I64Vec4 {
1752 type Output = Self;
1753 #[inline]
1754 fn shr(self, rhs: u16) -> Self::Output {
1755 Self {
1756 x: self.x.shr(rhs),
1757 y: self.y.shr(rhs),
1758 z: self.z.shr(rhs),
1759 w: self.w.shr(rhs),
1760 }
1761 }
1762}
1763
1764impl Shl<u32> for I64Vec4 {
1765 type Output = Self;
1766 #[inline]
1767 fn shl(self, rhs: u32) -> Self::Output {
1768 Self {
1769 x: self.x.shl(rhs),
1770 y: self.y.shl(rhs),
1771 z: self.z.shl(rhs),
1772 w: self.w.shl(rhs),
1773 }
1774 }
1775}
1776
1777impl Shr<u32> for I64Vec4 {
1778 type Output = Self;
1779 #[inline]
1780 fn shr(self, rhs: u32) -> Self::Output {
1781 Self {
1782 x: self.x.shr(rhs),
1783 y: self.y.shr(rhs),
1784 z: self.z.shr(rhs),
1785 w: self.w.shr(rhs),
1786 }
1787 }
1788}
1789
1790impl Shl<u64> for I64Vec4 {
1791 type Output = Self;
1792 #[inline]
1793 fn shl(self, rhs: u64) -> Self::Output {
1794 Self {
1795 x: self.x.shl(rhs),
1796 y: self.y.shl(rhs),
1797 z: self.z.shl(rhs),
1798 w: self.w.shl(rhs),
1799 }
1800 }
1801}
1802
1803impl Shr<u64> for I64Vec4 {
1804 type Output = Self;
1805 #[inline]
1806 fn shr(self, rhs: u64) -> Self::Output {
1807 Self {
1808 x: self.x.shr(rhs),
1809 y: self.y.shr(rhs),
1810 z: self.z.shr(rhs),
1811 w: self.w.shr(rhs),
1812 }
1813 }
1814}
1815
1816impl Shl<crate::IVec4> for I64Vec4 {
1817 type Output = Self;
1818 #[inline]
1819 fn shl(self, rhs: crate::IVec4) -> Self::Output {
1820 Self {
1821 x: self.x.shl(rhs.x),
1822 y: self.y.shl(rhs.y),
1823 z: self.z.shl(rhs.z),
1824 w: self.w.shl(rhs.w),
1825 }
1826 }
1827}
1828
1829impl Shr<crate::IVec4> for I64Vec4 {
1830 type Output = Self;
1831 #[inline]
1832 fn shr(self, rhs: crate::IVec4) -> Self::Output {
1833 Self {
1834 x: self.x.shr(rhs.x),
1835 y: self.y.shr(rhs.y),
1836 z: self.z.shr(rhs.z),
1837 w: self.w.shr(rhs.w),
1838 }
1839 }
1840}
1841
1842impl Shl<crate::UVec4> for I64Vec4 {
1843 type Output = Self;
1844 #[inline]
1845 fn shl(self, rhs: crate::UVec4) -> Self::Output {
1846 Self {
1847 x: self.x.shl(rhs.x),
1848 y: self.y.shl(rhs.y),
1849 z: self.z.shl(rhs.z),
1850 w: self.w.shl(rhs.w),
1851 }
1852 }
1853}
1854
1855impl Shr<crate::UVec4> for I64Vec4 {
1856 type Output = Self;
1857 #[inline]
1858 fn shr(self, rhs: crate::UVec4) -> Self::Output {
1859 Self {
1860 x: self.x.shr(rhs.x),
1861 y: self.y.shr(rhs.y),
1862 z: self.z.shr(rhs.z),
1863 w: self.w.shr(rhs.w),
1864 }
1865 }
1866}
1867
1868impl Index<usize> for I64Vec4 {
1869 type Output = i64;
1870 #[inline]
1871 fn index(&self, index: usize) -> &Self::Output {
1872 match index {
1873 0 => &self.x,
1874 1 => &self.y,
1875 2 => &self.z,
1876 3 => &self.w,
1877 _ => panic!("index out of bounds"),
1878 }
1879 }
1880}
1881
1882impl IndexMut<usize> for I64Vec4 {
1883 #[inline]
1884 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1885 match index {
1886 0 => &mut self.x,
1887 1 => &mut self.y,
1888 2 => &mut self.z,
1889 3 => &mut self.w,
1890 _ => panic!("index out of bounds"),
1891 }
1892 }
1893}
1894
1895impl fmt::Display for I64Vec4 {
1896 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1897 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1898 }
1899}
1900
1901impl fmt::Debug for I64Vec4 {
1902 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1903 fmt.debug_tuple(stringify!(I64Vec4))
1904 .field(&self.x)
1905 .field(&self.y)
1906 .field(&self.z)
1907 .field(&self.w)
1908 .finish()
1909 }
1910}
1911
1912impl From<[i64; 4]> for I64Vec4 {
1913 #[inline]
1914 fn from(a: [i64; 4]) -> Self {
1915 Self::new(a[0], a[1], a[2], a[3])
1916 }
1917}
1918
1919impl From<I64Vec4> for [i64; 4] {
1920 #[inline]
1921 fn from(v: I64Vec4) -> Self {
1922 [v.x, v.y, v.z, v.w]
1923 }
1924}
1925
1926impl From<(i64, i64, i64, i64)> for I64Vec4 {
1927 #[inline]
1928 fn from(t: (i64, i64, i64, i64)) -> Self {
1929 Self::new(t.0, t.1, t.2, t.3)
1930 }
1931}
1932
1933impl From<I64Vec4> for (i64, i64, i64, i64) {
1934 #[inline]
1935 fn from(v: I64Vec4) -> Self {
1936 (v.x, v.y, v.z, v.w)
1937 }
1938}
1939
1940impl From<(I64Vec3, i64)> for I64Vec4 {
1941 #[inline]
1942 fn from((v, w): (I64Vec3, i64)) -> Self {
1943 Self::new(v.x, v.y, v.z, w)
1944 }
1945}
1946
1947impl From<(i64, I64Vec3)> for I64Vec4 {
1948 #[inline]
1949 fn from((x, v): (i64, I64Vec3)) -> Self {
1950 Self::new(x, v.x, v.y, v.z)
1951 }
1952}
1953
1954impl From<(I64Vec2, i64, i64)> for I64Vec4 {
1955 #[inline]
1956 fn from((v, z, w): (I64Vec2, i64, i64)) -> Self {
1957 Self::new(v.x, v.y, z, w)
1958 }
1959}
1960
1961impl From<(I64Vec2, I64Vec2)> for I64Vec4 {
1962 #[inline]
1963 fn from((v, u): (I64Vec2, I64Vec2)) -> Self {
1964 Self::new(v.x, v.y, u.x, u.y)
1965 }
1966}
1967
1968impl From<I8Vec4> for I64Vec4 {
1969 #[inline]
1970 fn from(v: I8Vec4) -> Self {
1971 Self::new(
1972 i64::from(v.x),
1973 i64::from(v.y),
1974 i64::from(v.z),
1975 i64::from(v.w),
1976 )
1977 }
1978}
1979
1980impl From<U8Vec4> for I64Vec4 {
1981 #[inline]
1982 fn from(v: U8Vec4) -> Self {
1983 Self::new(
1984 i64::from(v.x),
1985 i64::from(v.y),
1986 i64::from(v.z),
1987 i64::from(v.w),
1988 )
1989 }
1990}
1991
1992impl From<I16Vec4> for I64Vec4 {
1993 #[inline]
1994 fn from(v: I16Vec4) -> Self {
1995 Self::new(
1996 i64::from(v.x),
1997 i64::from(v.y),
1998 i64::from(v.z),
1999 i64::from(v.w),
2000 )
2001 }
2002}
2003
2004impl From<U16Vec4> for I64Vec4 {
2005 #[inline]
2006 fn from(v: U16Vec4) -> Self {
2007 Self::new(
2008 i64::from(v.x),
2009 i64::from(v.y),
2010 i64::from(v.z),
2011 i64::from(v.w),
2012 )
2013 }
2014}
2015
2016impl From<IVec4> for I64Vec4 {
2017 #[inline]
2018 fn from(v: IVec4) -> Self {
2019 Self::new(
2020 i64::from(v.x),
2021 i64::from(v.y),
2022 i64::from(v.z),
2023 i64::from(v.w),
2024 )
2025 }
2026}
2027
2028impl From<UVec4> for I64Vec4 {
2029 #[inline]
2030 fn from(v: UVec4) -> Self {
2031 Self::new(
2032 i64::from(v.x),
2033 i64::from(v.y),
2034 i64::from(v.z),
2035 i64::from(v.w),
2036 )
2037 }
2038}
2039
2040impl TryFrom<U64Vec4> for I64Vec4 {
2041 type Error = core::num::TryFromIntError;
2042
2043 #[inline]
2044 fn try_from(v: U64Vec4) -> Result<Self, Self::Error> {
2045 Ok(Self::new(
2046 i64::try_from(v.x)?,
2047 i64::try_from(v.y)?,
2048 i64::try_from(v.z)?,
2049 i64::try_from(v.w)?,
2050 ))
2051 }
2052}
2053
2054impl From<BVec4> for I64Vec4 {
2055 #[inline]
2056 fn from(v: BVec4) -> Self {
2057 Self::new(
2058 i64::from(v.x),
2059 i64::from(v.y),
2060 i64::from(v.z),
2061 i64::from(v.w),
2062 )
2063 }
2064}
2065
2066#[cfg(not(feature = "scalar-math"))]
2067
2068impl From<BVec4A> for I64Vec4 {
2069 #[inline]
2070 fn from(v: BVec4A) -> Self {
2071 let bool_array: [bool; 4] = v.into();
2072 Self::new(
2073 i64::from(bool_array[0]),
2074 i64::from(bool_array[1]),
2075 i64::from(bool_array[2]),
2076 i64::from(bool_array[3]),
2077 )
2078 }
2079}