1#![cfg_attr(feature = "rkyv-serialize", allow(unsafe_op_in_unsafe_fn))]
3
4use num::{One, Zero};
5
6use approx::{AbsDiffEq, RelativeEq, UlpsEq};
7use std::any::TypeId;
8use std::cmp::Ordering;
9use std::fmt;
10use std::hash::{Hash, Hasher};
11use std::marker::PhantomData;
12use std::mem;
13
14#[cfg(feature = "serde-serialize-no-std")]
15use serde::{Deserialize, Deserializer, Serialize, Serializer};
16
17#[cfg(feature = "rkyv-serialize-no-std")]
18use super::rkyv_wrappers::CustomPhantom;
19#[cfg(feature = "rkyv-serialize")]
20use rkyv::bytecheck;
21#[cfg(feature = "rkyv-serialize-no-std")]
22use rkyv::{Archive, Archived, with::With};
23
24use simba::scalar::{ClosedAddAssign, ClosedMulAssign, ClosedSubAssign, Field, SupersetOf};
25use simba::simd::SimdPartialOrd;
26
27use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
28use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
29use crate::base::dimension::{Dim, DimAdd, DimSum, IsNotStaticOne, U1, U2, U3};
30use crate::base::iter::{
31 ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut,
32};
33use crate::base::storage::{Owned, RawStorage, RawStorageMut, SameShapeStorage};
34use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit};
35use crate::{ArrayStorage, SMatrix, SimdComplexField, Storage, UninitMatrix};
36
37use crate::storage::IsContiguous;
38use crate::uninit::{Init, InitStatus, Uninit};
39#[cfg(any(feature = "std", feature = "alloc"))]
40use crate::{DMatrix, DVector, Dyn, RowDVector, VecStorage};
41use std::mem::MaybeUninit;
42
43pub type SquareMatrix<T, D, S> = Matrix<T, D, D, S>;
45
46pub type Vector<T, D, S> = Matrix<T, D, U1, S>;
48
49pub type RowVector<T, D, S> = Matrix<T, U1, D, S>;
51
52pub type MatrixSum<T, R1, C1, R2, C2> =
54 Matrix<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>, SameShapeStorage<T, R1, C1, R2, C2>>;
55
56pub type VectorSum<T, R1, R2> =
58 Matrix<T, SameShapeR<R1, R2>, U1, SameShapeStorage<T, R1, U1, R2, U1>>;
59
60pub type MatrixCross<T, R1, C1, R2, C2> =
62 Matrix<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>, SameShapeStorage<T, R1, C1, R2, C2>>;
63
64#[repr(C)]
163#[derive(Clone, Copy)]
164#[cfg_attr(
165 feature = "rkyv-serialize-no-std",
166 derive(Archive, rkyv::Serialize, rkyv::Deserialize),
167 archive(
168 as = "Matrix<T::Archived, R, C, S::Archived>",
169 bound(archive = "
170 T: Archive,
171 S: Archive,
172 With<PhantomData<(T, R, C)>, CustomPhantom<(Archived<T>, R, C)>>: Archive<Archived = PhantomData<(Archived<T>, R, C)>>
173 ")
174 )
175)]
176#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
177#[cfg_attr(feature = "defmt", derive(defmt::Format))]
178pub struct Matrix<T, R, C, S> {
179 pub data: S,
203
204 #[cfg_attr(feature = "rkyv-serialize-no-std", with(CustomPhantom<(T::Archived, R, C)>))]
215 _phantoms: PhantomData<(T, R, C)>,
216}
217
218impl<T, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<T, R, C, S> {
219 fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
220 self.data.fmt(formatter)
221 }
222}
223
224impl<T, R, C, S> Default for Matrix<T, R, C, S>
225where
226 T: Scalar,
227 R: Dim,
228 C: Dim,
229 S: Default,
230{
231 fn default() -> Self {
232 Matrix {
233 data: Default::default(),
234 _phantoms: PhantomData,
235 }
236 }
237}
238
239#[cfg(feature = "serde-serialize-no-std")]
240impl<T, R, C, S> Serialize for Matrix<T, R, C, S>
241where
242 T: Scalar,
243 R: Dim,
244 C: Dim,
245 S: Serialize,
246{
247 fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
248 where
249 Ser: Serializer,
250 {
251 self.data.serialize(serializer)
252 }
253}
254
255#[cfg(feature = "serde-serialize-no-std")]
256impl<'de, T, R, C, S> Deserialize<'de> for Matrix<T, R, C, S>
257where
258 T: Scalar,
259 R: Dim,
260 C: Dim,
261 S: Deserialize<'de>,
262{
263 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
264 where
265 D: Deserializer<'de>,
266 {
267 S::deserialize(deserializer).map(|x| Matrix {
268 data: x,
269 _phantoms: PhantomData,
270 })
271 }
272}
273
274#[cfg(feature = "compare")]
275impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> matrixcompare_core::Matrix<T>
276 for Matrix<T, R, C, S>
277{
278 fn rows(&self) -> usize {
279 self.nrows()
280 }
281
282 fn cols(&self) -> usize {
283 self.ncols()
284 }
285
286 fn access(&self) -> matrixcompare_core::Access<'_, T> {
287 matrixcompare_core::Access::Dense(self)
288 }
289}
290
291#[cfg(feature = "compare")]
292impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> matrixcompare_core::DenseAccess<T>
293 for Matrix<T, R, C, S>
294{
295 fn fetch_single(&self, row: usize, col: usize) -> T {
296 self.index((row, col)).clone()
297 }
298}
299
300#[cfg(feature = "bytemuck")]
301unsafe impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> bytemuck::Zeroable
302 for Matrix<T, R, C, S>
303where
304 S: bytemuck::Zeroable,
305{
306}
307
308#[cfg(feature = "bytemuck")]
309unsafe impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> bytemuck::Pod for Matrix<T, R, C, S>
310where
311 S: bytemuck::Pod,
312 Self: Copy,
313{
314}
315
316impl<T, R, C, S> Matrix<T, R, C, S> {
317 #[inline(always)]
324 pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix<T, R, C, S> {
325 Matrix {
326 data,
327 _phantoms: PhantomData,
328 }
329 }
330}
331
332impl<T, const R: usize, const C: usize> SMatrix<T, R, C> {
333 #[inline(always)]
338 pub const fn from_array_storage(storage: ArrayStorage<T, R, C>) -> Self {
339 unsafe { Self::from_data_statically_unchecked(storage) }
342 }
343}
344
345#[cfg(any(feature = "std", feature = "alloc"))]
348impl<T> DMatrix<T> {
349 pub const fn from_vec_storage(storage: VecStorage<T, Dyn, Dyn>) -> Self {
354 unsafe { Self::from_data_statically_unchecked(storage) }
357 }
358}
359
360#[cfg(any(feature = "std", feature = "alloc"))]
363impl<T> DVector<T> {
364 pub const fn from_vec_storage(storage: VecStorage<T, Dyn, U1>) -> Self {
369 unsafe { Self::from_data_statically_unchecked(storage) }
372 }
373}
374
375#[cfg(any(feature = "std", feature = "alloc"))]
378impl<T> RowDVector<T> {
379 pub const fn from_vec_storage(storage: VecStorage<T, U1, Dyn>) -> Self {
384 unsafe { Self::from_data_statically_unchecked(storage) }
387 }
388}
389
390impl<T: Scalar, R: Dim, C: Dim> UninitMatrix<T, R, C>
391where
392 DefaultAllocator: Allocator<R, C>,
393{
394 #[inline(always)]
400 pub unsafe fn assume_init(self) -> OMatrix<T, R, C> {
401 unsafe {
402 OMatrix::from_data(<DefaultAllocator as Allocator<R, C>>::assume_init(
403 self.data,
404 ))
405 }
406 }
407}
408
409impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
410 #[inline(always)]
412 pub const fn from_data(data: S) -> Self {
413 unsafe { Self::from_data_statically_unchecked(data) }
414 }
415
416 #[inline]
425 #[must_use]
426 pub fn shape(&self) -> (usize, usize) {
427 let (nrows, ncols) = self.shape_generic();
428 (nrows.value(), ncols.value())
429 }
430
431 #[inline]
433 #[must_use]
434 pub fn shape_generic(&self) -> (R, C) {
435 self.data.shape()
436 }
437
438 #[inline]
447 #[must_use]
448 pub fn nrows(&self) -> usize {
449 self.shape().0
450 }
451
452 #[inline]
461 #[must_use]
462 pub fn ncols(&self) -> usize {
463 self.shape().1
464 }
465
466 #[inline]
477 #[must_use]
478 pub fn strides(&self) -> (usize, usize) {
479 let (srows, scols) = self.data.strides();
480 (srows.value(), scols.value())
481 }
482
483 #[inline]
496 #[must_use]
497 pub fn vector_to_matrix_index(&self, i: usize) -> (usize, usize) {
498 let (nrows, ncols) = self.shape();
499
500 if nrows == 1 {
503 (0, i)
504 } else if ncols == 1 {
505 (i, 0)
506 } else {
507 (i % nrows, i / nrows)
508 }
509 }
510
511 #[inline]
525 #[must_use]
526 pub fn as_ptr(&self) -> *const T {
527 self.data.ptr()
528 }
529
530 #[inline]
534 #[must_use]
535 pub fn relative_eq<R2, C2, SB>(
536 &self,
537 other: &Matrix<T, R2, C2, SB>,
538 eps: T::Epsilon,
539 max_relative: T::Epsilon,
540 ) -> bool
541 where
542 T: RelativeEq + Scalar,
543 R2: Dim,
544 C2: Dim,
545 SB: Storage<T, R2, C2>,
546 T::Epsilon: Clone,
547 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
548 {
549 assert!(self.shape() == other.shape());
550 self.iter()
551 .zip(other.iter())
552 .all(|(a, b)| a.relative_eq(b, eps.clone(), max_relative.clone()))
553 }
554
555 #[inline]
557 #[must_use]
558 #[allow(clippy::should_implement_trait)]
559 pub fn eq<R2, C2, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
560 where
561 T: PartialEq,
562 R2: Dim,
563 C2: Dim,
564 SB: RawStorage<T, R2, C2>,
565 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
566 {
567 assert!(self.shape() == other.shape());
568 self.iter().zip(other.iter()).all(|(a, b)| *a == *b)
569 }
570
571 #[inline]
573 pub fn into_owned(self) -> OMatrix<T, R, C>
574 where
575 T: Scalar,
576 S: Storage<T, R, C>,
577 DefaultAllocator: Allocator<R, C>,
578 {
579 Matrix::from_data(self.data.into_owned())
580 }
581
582 #[inline]
587 pub fn into_owned_sum<R2, C2>(self) -> MatrixSum<T, R, C, R2, C2>
588 where
589 T: Scalar,
590 S: Storage<T, R, C>,
591 R2: Dim,
592 C2: Dim,
593 DefaultAllocator: SameShapeAllocator<R, C, R2, C2>,
594 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
595 {
596 if TypeId::of::<SameShapeStorage<T, R, C, R2, C2>>() == TypeId::of::<Owned<T, R, C>>() {
597 unsafe {
600 let owned = self.into_owned();
602 let res = mem::transmute_copy(&owned);
603 mem::forget(owned);
604 res
605 }
606 } else {
607 self.clone_owned_sum()
608 }
609 }
610
611 #[inline]
613 #[must_use]
614 pub fn clone_owned(&self) -> OMatrix<T, R, C>
615 where
616 T: Scalar,
617 S: Storage<T, R, C>,
618 DefaultAllocator: Allocator<R, C>,
619 {
620 Matrix::from_data(self.data.clone_owned())
621 }
622
623 #[inline]
626 #[must_use]
627 pub fn clone_owned_sum<R2, C2>(&self) -> MatrixSum<T, R, C, R2, C2>
628 where
629 T: Scalar,
630 S: Storage<T, R, C>,
631 R2: Dim,
632 C2: Dim,
633 DefaultAllocator: SameShapeAllocator<R, C, R2, C2>,
634 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
635 {
636 let (nrows, ncols) = self.shape();
637 let nrows: SameShapeR<R, R2> = Dim::from_usize(nrows);
638 let ncols: SameShapeC<C, C2> = Dim::from_usize(ncols);
639
640 let mut res = Matrix::uninit(nrows, ncols);
641
642 unsafe {
643 for j in 0..res.ncols() {
645 for i in 0..res.nrows() {
646 *res.get_unchecked_mut((i, j)) =
647 MaybeUninit::new(self.get_unchecked((i, j)).clone());
648 }
649 }
650
651 res.assume_init()
653 }
654 }
655
656 #[inline]
658 fn transpose_to_uninit<Status, R2, C2, SB>(
659 &self,
660 _status: Status,
661 out: &mut Matrix<Status::Value, R2, C2, SB>,
662 ) where
663 Status: InitStatus<T>,
664 T: Scalar,
665 R2: Dim,
666 C2: Dim,
667 SB: RawStorageMut<Status::Value, R2, C2>,
668 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
669 {
670 let (nrows, ncols) = self.shape();
671 assert!(
672 (ncols, nrows) == out.shape(),
673 "Incompatible shape for transposition."
674 );
675
676 for i in 0..nrows {
678 for j in 0..ncols {
679 unsafe {
681 Status::init(
682 out.get_unchecked_mut((j, i)),
683 self.get_unchecked((i, j)).clone(),
684 );
685 }
686 }
687 }
688 }
689
690 #[inline]
692 pub fn transpose_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
693 where
694 T: Scalar,
695 R2: Dim,
696 C2: Dim,
697 SB: RawStorageMut<T, R2, C2>,
698 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
699 {
700 self.transpose_to_uninit(Init, out)
701 }
702
703 #[inline]
705 #[must_use = "Did you mean to use transpose_mut()?"]
706 pub fn transpose(&self) -> OMatrix<T, C, R>
707 where
708 T: Scalar,
709 DefaultAllocator: Allocator<C, R>,
710 {
711 let (nrows, ncols) = self.shape_generic();
712
713 let mut res = Matrix::uninit(ncols, nrows);
714 self.transpose_to_uninit(Uninit, &mut res);
715 unsafe { res.assume_init() }
717 }
718}
719
720impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
722 #[inline]
724 #[must_use]
725 pub fn map<T2: Scalar, F: FnMut(T) -> T2>(&self, mut f: F) -> OMatrix<T2, R, C>
726 where
727 T: Scalar,
728 DefaultAllocator: Allocator<R, C>,
729 {
730 let (nrows, ncols) = self.shape_generic();
731 let mut res = Matrix::uninit(nrows, ncols);
732
733 for j in 0..ncols.value() {
734 for i in 0..nrows.value() {
735 unsafe {
737 let a = self.data.get_unchecked(i, j).clone();
738 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a));
739 }
740 }
741 }
742
743 unsafe { res.assume_init() }
745 }
746
747 pub fn cast<T2: Scalar>(self) -> OMatrix<T2, R, C>
757 where
758 T: Scalar,
759 OMatrix<T2, R, C>: SupersetOf<Self>,
760 DefaultAllocator: Allocator<R, C>,
761 {
762 crate::convert(self)
763 }
764
765 pub fn try_cast<T2: Scalar>(self) -> Option<OMatrix<T2, R, C>>
775 where
776 T: Scalar,
777 Self: SupersetOf<OMatrix<T2, R, C>>,
778 DefaultAllocator: Allocator<R, C>,
779 {
780 crate::try_convert(self)
781 }
782
783 #[inline]
791 #[must_use]
792 pub fn fold_with<T2>(
793 &self,
794 init_f: impl FnOnce(Option<&T>) -> T2,
795 f: impl FnMut(T2, &T) -> T2,
796 ) -> T2
797 where
798 T: Scalar,
799 {
800 let mut it = self.iter();
801 let init = init_f(it.next());
802 it.fold(init, f)
803 }
804
805 #[inline]
808 #[must_use]
809 pub fn map_with_location<T2: Scalar, F: FnMut(usize, usize, T) -> T2>(
810 &self,
811 mut f: F,
812 ) -> OMatrix<T2, R, C>
813 where
814 T: Scalar,
815 DefaultAllocator: Allocator<R, C>,
816 {
817 let (nrows, ncols) = self.shape_generic();
818 let mut res = Matrix::uninit(nrows, ncols);
819
820 for j in 0..ncols.value() {
821 for i in 0..nrows.value() {
822 unsafe {
824 let a = self.data.get_unchecked(i, j).clone();
825 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a));
826 }
827 }
828 }
829
830 unsafe { res.assume_init() }
832 }
833
834 #[inline]
837 #[must_use]
838 pub fn zip_map<T2, N3, S2, F>(&self, rhs: &Matrix<T2, R, C, S2>, mut f: F) -> OMatrix<N3, R, C>
839 where
840 T: Scalar,
841 T2: Scalar,
842 N3: Scalar,
843 S2: RawStorage<T2, R, C>,
844 F: FnMut(T, T2) -> N3,
845 DefaultAllocator: Allocator<R, C>,
846 {
847 let (nrows, ncols) = self.shape_generic();
848 let mut res = Matrix::uninit(nrows, ncols);
849
850 assert_eq!(
851 (nrows.value(), ncols.value()),
852 rhs.shape(),
853 "Matrix simultaneous traversal error: dimension mismatch."
854 );
855
856 for j in 0..ncols.value() {
857 for i in 0..nrows.value() {
858 unsafe {
860 let a = self.data.get_unchecked(i, j).clone();
861 let b = rhs.data.get_unchecked(i, j).clone();
862 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b))
863 }
864 }
865 }
866
867 unsafe { res.assume_init() }
869 }
870
871 #[inline]
874 #[must_use]
875 pub fn zip_zip_map<T2, N3, N4, S2, S3, F>(
876 &self,
877 b: &Matrix<T2, R, C, S2>,
878 c: &Matrix<N3, R, C, S3>,
879 mut f: F,
880 ) -> OMatrix<N4, R, C>
881 where
882 T: Scalar,
883 T2: Scalar,
884 N3: Scalar,
885 N4: Scalar,
886 S2: RawStorage<T2, R, C>,
887 S3: RawStorage<N3, R, C>,
888 F: FnMut(T, T2, N3) -> N4,
889 DefaultAllocator: Allocator<R, C>,
890 {
891 let (nrows, ncols) = self.shape_generic();
892 let mut res = Matrix::uninit(nrows, ncols);
893
894 assert_eq!(
895 (nrows.value(), ncols.value()),
896 b.shape(),
897 "Matrix simultaneous traversal error: dimension mismatch."
898 );
899 assert_eq!(
900 (nrows.value(), ncols.value()),
901 c.shape(),
902 "Matrix simultaneous traversal error: dimension mismatch."
903 );
904
905 for j in 0..ncols.value() {
906 for i in 0..nrows.value() {
907 unsafe {
909 let a = self.data.get_unchecked(i, j).clone();
910 let b = b.data.get_unchecked(i, j).clone();
911 let c = c.data.get_unchecked(i, j).clone();
912 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c))
913 }
914 }
915 }
916
917 unsafe { res.assume_init() }
919 }
920
921 #[inline]
923 #[must_use]
924 pub fn fold<Acc>(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc
925 where
926 T: Scalar,
927 {
928 let (nrows, ncols) = self.shape_generic();
929
930 let mut res = init;
931
932 for j in 0..ncols.value() {
933 for i in 0..nrows.value() {
934 unsafe {
936 let a = self.data.get_unchecked(i, j).clone();
937 res = f(res, a)
938 }
939 }
940 }
941
942 res
943 }
944
945 #[inline]
947 #[must_use]
948 pub fn zip_fold<T2, R2, C2, S2, Acc>(
949 &self,
950 rhs: &Matrix<T2, R2, C2, S2>,
951 init: Acc,
952 mut f: impl FnMut(Acc, T, T2) -> Acc,
953 ) -> Acc
954 where
955 T: Scalar,
956 T2: Scalar,
957 R2: Dim,
958 C2: Dim,
959 S2: RawStorage<T2, R2, C2>,
960 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
961 {
962 let (nrows, ncols) = self.shape_generic();
963
964 let mut res = init;
965
966 assert_eq!(
967 (nrows.value(), ncols.value()),
968 rhs.shape(),
969 "Matrix simultaneous traversal error: dimension mismatch."
970 );
971
972 for j in 0..ncols.value() {
973 for i in 0..nrows.value() {
974 unsafe {
975 let a = self.data.get_unchecked(i, j).clone();
976 let b = rhs.data.get_unchecked(i, j).clone();
977 res = f(res, a, b)
978 }
979 }
980 }
981
982 res
983 }
984
985 #[inline]
987 pub fn apply<F: FnMut(&mut T)>(&mut self, mut f: F)
988 where
989 S: RawStorageMut<T, R, C>,
990 {
991 let (nrows, ncols) = self.shape();
992
993 for j in 0..ncols {
994 for i in 0..nrows {
995 unsafe {
996 let e = self.data.get_unchecked_mut(i, j);
997 f(e)
998 }
999 }
1000 }
1001 }
1002
1003 #[inline]
1006 pub fn zip_apply<T2, R2, C2, S2>(
1007 &mut self,
1008 rhs: &Matrix<T2, R2, C2, S2>,
1009 mut f: impl FnMut(&mut T, T2),
1010 ) where
1011 S: RawStorageMut<T, R, C>,
1012 T2: Scalar,
1013 R2: Dim,
1014 C2: Dim,
1015 S2: RawStorage<T2, R2, C2>,
1016 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1017 {
1018 let (nrows, ncols) = self.shape();
1019
1020 assert_eq!(
1021 (nrows, ncols),
1022 rhs.shape(),
1023 "Matrix simultaneous traversal error: dimension mismatch."
1024 );
1025
1026 for j in 0..ncols {
1027 for i in 0..nrows {
1028 unsafe {
1029 let e = self.data.get_unchecked_mut(i, j);
1030 let rhs = rhs.get_unchecked((i, j)).clone();
1031 f(e, rhs)
1032 }
1033 }
1034 }
1035 }
1036
1037 #[inline]
1040 pub fn zip_zip_apply<T2, R2, C2, S2, N3, R3, C3, S3>(
1041 &mut self,
1042 b: &Matrix<T2, R2, C2, S2>,
1043 c: &Matrix<N3, R3, C3, S3>,
1044 mut f: impl FnMut(&mut T, T2, N3),
1045 ) where
1046 S: RawStorageMut<T, R, C>,
1047 T2: Scalar,
1048 R2: Dim,
1049 C2: Dim,
1050 S2: RawStorage<T2, R2, C2>,
1051 N3: Scalar,
1052 R3: Dim,
1053 C3: Dim,
1054 S3: RawStorage<N3, R3, C3>,
1055 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1056 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1057 {
1058 let (nrows, ncols) = self.shape();
1059
1060 assert_eq!(
1061 (nrows, ncols),
1062 b.shape(),
1063 "Matrix simultaneous traversal error: dimension mismatch."
1064 );
1065 assert_eq!(
1066 (nrows, ncols),
1067 c.shape(),
1068 "Matrix simultaneous traversal error: dimension mismatch."
1069 );
1070
1071 for j in 0..ncols {
1072 for i in 0..nrows {
1073 unsafe {
1074 let e = self.data.get_unchecked_mut(i, j);
1075 let b = b.get_unchecked((i, j)).clone();
1076 let c = c.get_unchecked((i, j)).clone();
1077 f(e, b, c)
1078 }
1079 }
1080 }
1081 }
1082}
1083
1084impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
1086 #[inline]
1103 pub fn iter(&self) -> MatrixIter<'_, T, R, C, S> {
1104 MatrixIter::new(&self.data)
1105 }
1106
1107 #[inline]
1119 pub const fn row_iter(&self) -> RowIter<'_, T, R, C, S> {
1120 RowIter::new(self)
1121 }
1122
1123 #[inline]
1135 pub fn column_iter(&self) -> ColumnIter<'_, T, R, C, S> {
1136 ColumnIter::new(self)
1137 }
1138
1139 #[inline]
1141 pub fn iter_mut(&mut self) -> MatrixIterMut<'_, T, R, C, S>
1142 where
1143 S: RawStorageMut<T, R, C>,
1144 {
1145 MatrixIterMut::new(&mut self.data)
1146 }
1147
1148 #[inline]
1164 pub const fn row_iter_mut(&mut self) -> RowIterMut<'_, T, R, C, S>
1165 where
1166 S: RawStorageMut<T, R, C>,
1167 {
1168 RowIterMut::new(self)
1169 }
1170
1171 #[inline]
1187 pub fn column_iter_mut(&mut self) -> ColumnIterMut<'_, T, R, C, S>
1188 where
1189 S: RawStorageMut<T, R, C>,
1190 {
1191 ColumnIterMut::new(self)
1192 }
1193}
1194
1195impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
1196 #[inline]
1201 pub fn as_mut_ptr(&mut self) -> *mut T {
1202 self.data.ptr_mut()
1203 }
1204
1205 #[inline]
1211 pub unsafe fn swap_unchecked(&mut self, row_cols1: (usize, usize), row_cols2: (usize, usize)) {
1212 unsafe {
1213 debug_assert!(row_cols1.0 < self.nrows() && row_cols1.1 < self.ncols());
1214 debug_assert!(row_cols2.0 < self.nrows() && row_cols2.1 < self.ncols());
1215 self.data.swap_unchecked(row_cols1, row_cols2)
1216 }
1217 }
1218
1219 #[inline]
1221 pub fn swap(&mut self, row_cols1: (usize, usize), row_cols2: (usize, usize)) {
1222 let (nrows, ncols) = self.shape();
1223 assert!(
1224 row_cols1.0 < nrows && row_cols1.1 < ncols,
1225 "Matrix elements swap index out of bounds."
1226 );
1227 assert!(
1228 row_cols2.0 < nrows && row_cols2.1 < ncols,
1229 "Matrix elements swap index out of bounds."
1230 );
1231 unsafe { self.swap_unchecked(row_cols1, row_cols2) }
1232 }
1233
1234 #[inline]
1238 pub fn copy_from_slice(&mut self, slice: &[T])
1239 where
1240 T: Scalar,
1241 {
1242 let (nrows, ncols) = self.shape();
1243
1244 assert!(
1245 nrows * ncols == slice.len(),
1246 "The slice must contain the same number of elements as the matrix."
1247 );
1248
1249 for j in 0..ncols {
1250 for i in 0..nrows {
1251 unsafe {
1252 *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone();
1253 }
1254 }
1255 }
1256 }
1257
1258 #[inline]
1260 pub fn copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
1261 where
1262 T: Scalar,
1263 R2: Dim,
1264 C2: Dim,
1265 SB: RawStorage<T, R2, C2>,
1266 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1267 {
1268 assert!(
1269 self.shape() == other.shape(),
1270 "Unable to copy from a matrix with a different shape."
1271 );
1272
1273 for j in 0..self.ncols() {
1274 for i in 0..self.nrows() {
1275 unsafe {
1276 *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone();
1277 }
1278 }
1279 }
1280 }
1281
1282 #[inline]
1284 pub fn tr_copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
1285 where
1286 T: Scalar,
1287 R2: Dim,
1288 C2: Dim,
1289 SB: RawStorage<T, R2, C2>,
1290 ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
1291 {
1292 let (nrows, ncols) = self.shape();
1293 assert!(
1294 (ncols, nrows) == other.shape(),
1295 "Unable to copy from a matrix with incompatible shape."
1296 );
1297
1298 for j in 0..ncols {
1299 for i in 0..nrows {
1300 unsafe {
1301 *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone();
1302 }
1303 }
1304 }
1305 }
1306
1307 #[inline]
1310 pub fn apply_into<F: FnMut(&mut T)>(mut self, f: F) -> Self {
1311 self.apply(f);
1312 self
1313 }
1314}
1315
1316impl<T, D: Dim, S: RawStorage<T, D>> Vector<T, D, S> {
1317 #[inline]
1321 #[must_use]
1322 pub unsafe fn vget_unchecked(&self, i: usize) -> &T {
1323 unsafe {
1324 debug_assert!(i < self.nrows(), "Vector index out of bounds.");
1325 let i = i * self.strides().0;
1326 self.data.get_unchecked_linear(i)
1327 }
1328 }
1329}
1330
1331impl<T, D: Dim, S: RawStorageMut<T, D>> Vector<T, D, S> {
1332 #[inline]
1336 #[must_use]
1337 pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut T {
1338 unsafe {
1339 debug_assert!(i < self.nrows(), "Vector index out of bounds.");
1340 let i = i * self.strides().0;
1341 self.data.get_unchecked_linear_mut(i)
1342 }
1343 }
1344}
1345
1346impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C> + IsContiguous> Matrix<T, R, C, S> {
1347 #[inline]
1349 #[must_use]
1350 pub fn as_slice(&self) -> &[T] {
1351 unsafe { self.data.as_slice_unchecked() }
1353 }
1354}
1355
1356impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C> + IsContiguous> AsRef<[T]> for Matrix<T, R, C, S> {
1357 #[inline]
1359 fn as_ref(&self) -> &[T] {
1360 self.as_slice()
1361 }
1362}
1363
1364impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C> + IsContiguous> Matrix<T, R, C, S> {
1365 #[inline]
1367 #[must_use]
1368 pub fn as_mut_slice(&mut self) -> &mut [T] {
1369 unsafe { self.data.as_mut_slice_unchecked() }
1371 }
1372}
1373
1374impl<T: Scalar, D: Dim, S: RawStorageMut<T, D, D>> Matrix<T, D, D, S> {
1375 pub fn transpose_mut(&mut self) {
1377 assert!(
1378 self.is_square(),
1379 "Unable to transpose a non-square matrix in-place."
1380 );
1381
1382 let dim = self.shape().0;
1383
1384 for i in 1..dim {
1385 for j in 0..i {
1386 unsafe { self.swap_unchecked((i, j), (j, i)) }
1387 }
1388 }
1389 }
1390}
1391
1392impl<T: SimdComplexField, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
1393 #[inline]
1395 fn adjoint_to_uninit<Status, R2, C2, SB>(
1396 &self,
1397 _status: Status,
1398 out: &mut Matrix<Status::Value, R2, C2, SB>,
1399 ) where
1400 Status: InitStatus<T>,
1401 R2: Dim,
1402 C2: Dim,
1403 SB: RawStorageMut<Status::Value, R2, C2>,
1404 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1405 {
1406 let (nrows, ncols) = self.shape();
1407 assert!(
1408 (ncols, nrows) == out.shape(),
1409 "Incompatible shape for transpose-copy."
1410 );
1411
1412 for i in 0..nrows {
1414 for j in 0..ncols {
1415 unsafe {
1417 Status::init(
1418 out.get_unchecked_mut((j, i)),
1419 self.get_unchecked((i, j)).clone().simd_conjugate(),
1420 );
1421 }
1422 }
1423 }
1424 }
1425
1426 #[inline]
1428 pub fn adjoint_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
1429 where
1430 R2: Dim,
1431 C2: Dim,
1432 SB: RawStorageMut<T, R2, C2>,
1433 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1434 {
1435 self.adjoint_to_uninit(Init, out)
1436 }
1437
1438 #[inline]
1440 #[must_use = "Did you mean to use adjoint_mut()?"]
1441 pub fn adjoint(&self) -> OMatrix<T, C, R>
1442 where
1443 DefaultAllocator: Allocator<C, R>,
1444 {
1445 let (nrows, ncols) = self.shape_generic();
1446
1447 let mut res = Matrix::uninit(ncols, nrows);
1448 self.adjoint_to_uninit(Uninit, &mut res);
1449
1450 unsafe { res.assume_init() }
1452 }
1453
1454 #[deprecated(note = "Renamed `self.adjoint_to(out)`.")]
1456 #[inline]
1457 pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
1458 where
1459 R2: Dim,
1460 C2: Dim,
1461 SB: RawStorageMut<T, R2, C2>,
1462 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1463 {
1464 self.adjoint_to(out)
1465 }
1466
1467 #[deprecated(note = "Renamed `self.adjoint()`.")]
1469 #[inline]
1470 pub fn conjugate_transpose(&self) -> OMatrix<T, C, R>
1471 where
1472 DefaultAllocator: Allocator<C, R>,
1473 {
1474 self.adjoint()
1475 }
1476
1477 #[inline]
1479 #[must_use = "Did you mean to use conjugate_mut()?"]
1480 pub fn conjugate(&self) -> OMatrix<T, R, C>
1481 where
1482 DefaultAllocator: Allocator<R, C>,
1483 {
1484 self.map(|e| e.simd_conjugate())
1485 }
1486
1487 #[inline]
1489 #[must_use = "Did you mean to use unscale_mut()?"]
1490 pub fn unscale(&self, real: T::SimdRealField) -> OMatrix<T, R, C>
1491 where
1492 DefaultAllocator: Allocator<R, C>,
1493 {
1494 self.map(|e| e.simd_unscale(real.clone()))
1495 }
1496
1497 #[inline]
1499 #[must_use = "Did you mean to use scale_mut()?"]
1500 pub fn scale(&self, real: T::SimdRealField) -> OMatrix<T, R, C>
1501 where
1502 DefaultAllocator: Allocator<R, C>,
1503 {
1504 self.map(|e| e.simd_scale(real.clone()))
1505 }
1506}
1507
1508impl<T: SimdComplexField, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
1509 #[inline]
1511 pub fn conjugate_mut(&mut self) {
1512 self.apply(|e| *e = e.clone().simd_conjugate())
1513 }
1514
1515 #[inline]
1517 pub fn unscale_mut(&mut self, real: T::SimdRealField) {
1518 self.apply(|e| *e = e.clone().simd_unscale(real.clone()))
1519 }
1520
1521 #[inline]
1523 pub fn scale_mut(&mut self, real: T::SimdRealField) {
1524 self.apply(|e| *e = e.clone().simd_scale(real.clone()))
1525 }
1526}
1527
1528impl<T: SimdComplexField, D: Dim, S: RawStorageMut<T, D, D>> Matrix<T, D, D, S> {
1529 #[deprecated(note = "Renamed to `self.adjoint_mut()`.")]
1531 pub fn conjugate_transform_mut(&mut self) {
1532 self.adjoint_mut()
1533 }
1534
1535 pub fn adjoint_mut(&mut self) {
1537 assert!(
1538 self.is_square(),
1539 "Unable to transpose a non-square matrix in-place."
1540 );
1541
1542 let dim = self.shape().0;
1543
1544 for i in 0..dim {
1545 for j in 0..i {
1546 unsafe {
1547 let ref_ij = self.get_unchecked((i, j)).clone();
1548 let ref_ji = self.get_unchecked((j, i)).clone();
1549 let conj_ij = ref_ij.simd_conjugate();
1550 let conj_ji = ref_ji.simd_conjugate();
1551 *self.get_unchecked_mut((i, j)) = conj_ji;
1552 *self.get_unchecked_mut((j, i)) = conj_ij;
1553 }
1554 }
1555
1556 {
1557 let diag = unsafe { self.get_unchecked_mut((i, i)) };
1558 *diag = diag.clone().simd_conjugate();
1559 }
1560 }
1561 }
1562}
1563
1564impl<T: Scalar, D: Dim, S: RawStorage<T, D, D>> SquareMatrix<T, D, S> {
1565 #[inline]
1567 #[must_use]
1568 pub fn diagonal(&self) -> OVector<T, D>
1569 where
1570 DefaultAllocator: Allocator<D>,
1571 {
1572 self.map_diagonal(|e| e)
1573 }
1574
1575 #[must_use]
1580 pub fn map_diagonal<T2: Scalar>(&self, mut f: impl FnMut(T) -> T2) -> OVector<T2, D>
1581 where
1582 DefaultAllocator: Allocator<D>,
1583 {
1584 assert!(
1585 self.is_square(),
1586 "Unable to get the diagonal of a non-square matrix."
1587 );
1588
1589 let dim = self.shape_generic().0;
1590 let mut res = Matrix::uninit(dim, Const::<1>);
1591
1592 for i in 0..dim.value() {
1593 unsafe {
1595 *res.vget_unchecked_mut(i) =
1596 MaybeUninit::new(f(self.get_unchecked((i, i)).clone()));
1597 }
1598 }
1599
1600 unsafe { res.assume_init() }
1602 }
1603
1604 #[inline]
1606 #[must_use]
1607 pub fn trace(&self) -> T
1608 where
1609 T: Scalar + Zero + ClosedAddAssign,
1610 {
1611 assert!(
1612 self.is_square(),
1613 "Cannot compute the trace of non-square matrix."
1614 );
1615
1616 let dim = self.shape_generic().0;
1617 let mut res = T::zero();
1618
1619 for i in 0..dim.value() {
1620 res += unsafe { self.get_unchecked((i, i)).clone() };
1621 }
1622
1623 res
1624 }
1625}
1626
1627impl<T: SimdComplexField, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
1628 #[inline]
1630 #[must_use]
1631 pub fn symmetric_part(&self) -> OMatrix<T, D, D>
1632 where
1633 DefaultAllocator: Allocator<D, D>,
1634 {
1635 assert!(
1636 self.is_square(),
1637 "Cannot compute the symmetric part of a non-square matrix."
1638 );
1639 let mut tr = self.transpose();
1640 tr += self;
1641 tr *= crate::convert::<_, T>(0.5);
1642 tr
1643 }
1644
1645 #[inline]
1647 #[must_use]
1648 pub fn hermitian_part(&self) -> OMatrix<T, D, D>
1649 where
1650 DefaultAllocator: Allocator<D, D>,
1651 {
1652 assert!(
1653 self.is_square(),
1654 "Cannot compute the hermitian part of a non-square matrix."
1655 );
1656
1657 let mut tr = self.adjoint();
1658 tr += self;
1659 tr *= crate::convert::<_, T>(0.5);
1660 tr
1661 }
1662}
1663
1664impl<T: Scalar + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: RawStorage<T, D, D>>
1665 Matrix<T, D, D, S>
1666{
1667 #[inline]
1670 #[must_use]
1671 pub fn to_homogeneous(&self) -> OMatrix<T, DimSum<D, U1>, DimSum<D, U1>>
1672 where
1673 DefaultAllocator: Allocator<DimSum<D, U1>, DimSum<D, U1>>,
1674 {
1675 assert!(
1676 self.is_square(),
1677 "Only square matrices can currently be transformed to homogeneous coordinates."
1678 );
1679 let dim = DimSum::<D, U1>::from_usize(self.nrows() + 1);
1680 let mut res = OMatrix::identity_generic(dim, dim);
1681 res.generic_view_mut::<D, D>((0, 0), self.shape_generic())
1682 .copy_from(self);
1683 res
1684 }
1685}
1686
1687impl<T: Scalar + Zero, D: DimAdd<U1>, S: RawStorage<T, D>> Vector<T, D, S> {
1688 #[inline]
1691 #[must_use]
1692 pub fn to_homogeneous(&self) -> OVector<T, DimSum<D, U1>>
1693 where
1694 DefaultAllocator: Allocator<DimSum<D, U1>>,
1695 {
1696 self.push(T::zero())
1697 }
1698
1699 #[inline]
1702 pub fn from_homogeneous<SB>(v: Vector<T, DimSum<D, U1>, SB>) -> Option<OVector<T, D>>
1703 where
1704 SB: RawStorage<T, DimSum<D, U1>>,
1705 DefaultAllocator: Allocator<D>,
1706 {
1707 if v[v.len() - 1].is_zero() {
1708 let nrows = D::from_usize(v.len() - 1);
1709 Some(v.generic_view((0, 0), (nrows, Const::<1>)).into_owned())
1710 } else {
1711 None
1712 }
1713 }
1714}
1715
1716impl<T: Scalar, D: DimAdd<U1>, S: RawStorage<T, D>> Vector<T, D, S> {
1717 #[inline]
1719 #[must_use]
1720 pub fn push(&self, element: T) -> OVector<T, DimSum<D, U1>>
1721 where
1722 DefaultAllocator: Allocator<DimSum<D, U1>>,
1723 {
1724 let len = self.len();
1725 let hnrows = DimSum::<D, U1>::from_usize(len + 1);
1726 let mut res = Matrix::uninit(hnrows, Const::<1>);
1727 res.generic_view_mut((0, 0), self.shape_generic())
1730 .zip_apply(self, |out, e| *out = MaybeUninit::new(e));
1731 res[(len, 0)] = MaybeUninit::new(element);
1732
1733 unsafe { res.assume_init() }
1735 }
1736}
1737
1738impl<T, R: Dim, C: Dim, S> AbsDiffEq for Matrix<T, R, C, S>
1739where
1740 T: Scalar + AbsDiffEq,
1741 S: RawStorage<T, R, C>,
1742 T::Epsilon: Clone,
1743{
1744 type Epsilon = T::Epsilon;
1745
1746 #[inline]
1747 fn default_epsilon() -> Self::Epsilon {
1748 T::default_epsilon()
1749 }
1750
1751 #[inline]
1752 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
1753 self.iter()
1754 .zip(other.iter())
1755 .all(|(a, b)| a.abs_diff_eq(b, epsilon.clone()))
1756 }
1757}
1758
1759impl<T, R: Dim, C: Dim, S> RelativeEq for Matrix<T, R, C, S>
1760where
1761 T: Scalar + RelativeEq,
1762 S: Storage<T, R, C>,
1763 T::Epsilon: Clone,
1764{
1765 #[inline]
1766 fn default_max_relative() -> Self::Epsilon {
1767 T::default_max_relative()
1768 }
1769
1770 #[inline]
1771 fn relative_eq(
1772 &self,
1773 other: &Self,
1774 epsilon: Self::Epsilon,
1775 max_relative: Self::Epsilon,
1776 ) -> bool {
1777 self.relative_eq(other, epsilon, max_relative)
1778 }
1779}
1780
1781impl<T, R: Dim, C: Dim, S> UlpsEq for Matrix<T, R, C, S>
1782where
1783 T: Scalar + UlpsEq,
1784 S: RawStorage<T, R, C>,
1785 T::Epsilon: Clone,
1786{
1787 #[inline]
1788 fn default_max_ulps() -> u32 {
1789 T::default_max_ulps()
1790 }
1791
1792 #[inline]
1793 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
1794 assert!(self.shape() == other.shape());
1795 self.iter()
1796 .zip(other.iter())
1797 .all(|(a, b)| a.ulps_eq(b, epsilon.clone(), max_ulps))
1798 }
1799}
1800
1801impl<T, R: Dim, C: Dim, S> PartialOrd for Matrix<T, R, C, S>
1802where
1803 T: Scalar + PartialOrd,
1804 S: RawStorage<T, R, C>,
1805{
1806 #[inline]
1807 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1808 if self.shape() != other.shape() {
1809 return None;
1810 }
1811
1812 if self.nrows() == 0 || self.ncols() == 0 {
1813 return Some(Ordering::Equal);
1814 }
1815
1816 let mut first_ord = unsafe {
1817 self.data
1818 .get_unchecked_linear(0)
1819 .partial_cmp(other.data.get_unchecked_linear(0))
1820 };
1821
1822 if let Some(first_ord) = first_ord.as_mut() {
1823 let mut it = self.iter().zip(other.iter());
1824 let _ = it.next(); for (left, right) in it {
1827 if let Some(ord) = left.partial_cmp(right) {
1828 match ord {
1829 Ordering::Equal => { }
1830 Ordering::Less => {
1831 if *first_ord == Ordering::Greater {
1832 return None;
1833 }
1834 *first_ord = ord
1835 }
1836 Ordering::Greater => {
1837 if *first_ord == Ordering::Less {
1838 return None;
1839 }
1840 *first_ord = ord
1841 }
1842 }
1843 } else {
1844 return None;
1845 }
1846 }
1847 }
1848
1849 first_ord
1850 }
1851
1852 #[inline]
1853 fn lt(&self, right: &Self) -> bool {
1854 assert_eq!(
1855 self.shape(),
1856 right.shape(),
1857 "Matrix comparison error: dimensions mismatch."
1858 );
1859 self.iter().zip(right.iter()).all(|(a, b)| a.lt(b))
1860 }
1861
1862 #[inline]
1863 fn le(&self, right: &Self) -> bool {
1864 assert_eq!(
1865 self.shape(),
1866 right.shape(),
1867 "Matrix comparison error: dimensions mismatch."
1868 );
1869 self.iter().zip(right.iter()).all(|(a, b)| a.le(b))
1870 }
1871
1872 #[inline]
1873 fn gt(&self, right: &Self) -> bool {
1874 assert_eq!(
1875 self.shape(),
1876 right.shape(),
1877 "Matrix comparison error: dimensions mismatch."
1878 );
1879 self.iter().zip(right.iter()).all(|(a, b)| a.gt(b))
1880 }
1881
1882 #[inline]
1883 fn ge(&self, right: &Self) -> bool {
1884 assert_eq!(
1885 self.shape(),
1886 right.shape(),
1887 "Matrix comparison error: dimensions mismatch."
1888 );
1889 self.iter().zip(right.iter()).all(|(a, b)| a.ge(b))
1890 }
1891}
1892
1893impl<T, R: Dim, C: Dim, S> Eq for Matrix<T, R, C, S>
1894where
1895 T: Eq,
1896 S: RawStorage<T, R, C>,
1897{
1898}
1899
1900impl<T, R, R2, C, C2, S, S2> PartialEq<Matrix<T, R2, C2, S2>> for Matrix<T, R, C, S>
1901where
1902 T: PartialEq,
1903 C: Dim,
1904 C2: Dim,
1905 R: Dim,
1906 R2: Dim,
1907 S: RawStorage<T, R, C>,
1908 S2: RawStorage<T, R2, C2>,
1909{
1910 #[inline]
1911 fn eq(&self, right: &Matrix<T, R2, C2, S2>) -> bool {
1912 self.shape() == right.shape() && self.iter().zip(right.iter()).all(|(l, r)| l == r)
1913 }
1914}
1915
1916macro_rules! impl_fmt {
1917 ($trait: path, $fmt_str_without_precision: expr_2021, $fmt_str_with_precision: expr_2021) => {
1918 impl<T, R: Dim, C: Dim, S> $trait for Matrix<T, R, C, S>
1919 where
1920 T: Scalar + $trait,
1921 S: RawStorage<T, R, C>,
1922 {
1923 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1924 #[cfg(feature = "std")]
1925 fn val_width<T: Scalar + $trait>(val: &T, f: &mut fmt::Formatter<'_>) -> usize {
1926 match f.precision() {
1927 Some(precision) => format!($fmt_str_with_precision, val, precision)
1928 .chars()
1929 .count(),
1930 None => format!($fmt_str_without_precision, val).chars().count(),
1931 }
1932 }
1933
1934 #[cfg(not(feature = "std"))]
1935 fn val_width<T: Scalar + $trait>(_: &T, _: &mut fmt::Formatter<'_>) -> usize {
1936 4
1937 }
1938
1939 let (nrows, ncols) = self.shape();
1940
1941 if nrows == 0 || ncols == 0 {
1942 return write!(f, "[ ]");
1943 }
1944
1945 let mut max_length = 0;
1946
1947 for i in 0..nrows {
1948 for j in 0..ncols {
1949 max_length = crate::max(max_length, val_width(&self[(i, j)], f));
1950 }
1951 }
1952
1953 let max_length_with_space = max_length + 1;
1954
1955 writeln!(f)?;
1956 writeln!(
1957 f,
1958 " ┌ {:>width$} ┐",
1959 "",
1960 width = max_length_with_space * ncols - 1
1961 )?;
1962
1963 for i in 0..nrows {
1964 write!(f, " │")?;
1965 for j in 0..ncols {
1966 let number_length = val_width(&self[(i, j)], f) + 1;
1967 let pad = max_length_with_space - number_length;
1968 write!(f, " {:>thepad$}", "", thepad = pad)?;
1969 match f.precision() {
1970 Some(precision) => {
1971 write!(f, $fmt_str_with_precision, (*self)[(i, j)], precision)?
1972 }
1973 None => write!(f, $fmt_str_without_precision, (*self)[(i, j)])?,
1974 }
1975 }
1976 writeln!(f, " │")?;
1977 }
1978
1979 writeln!(
1980 f,
1981 " └ {:>width$} ┘",
1982 "",
1983 width = max_length_with_space * ncols - 1
1984 )?;
1985 writeln!(f)
1986 }
1987 }
1988 };
1989}
1990impl_fmt!(fmt::Display, "{}", "{:.1$}");
1991impl_fmt!(fmt::LowerExp, "{:e}", "{:.1$e}");
1992impl_fmt!(fmt::UpperExp, "{:E}", "{:.1$E}");
1993impl_fmt!(fmt::Octal, "{:o}", "{:1$o}");
1994impl_fmt!(fmt::LowerHex, "{:x}", "{:1$x}");
1995impl_fmt!(fmt::UpperHex, "{:X}", "{:1$X}");
1996impl_fmt!(fmt::Binary, "{:b}", "{:.1$b}");
1997impl_fmt!(fmt::Pointer, "{:p}", "{:.1$p}");
1998
1999#[cfg(test)]
2000mod tests {
2001 #[test]
2002 fn empty_display() {
2003 let vec: Vec<f64> = Vec::new();
2004 let dvector = crate::DVector::from_vec(vec);
2005 assert_eq!(format!("{}", dvector), "[ ]")
2006 }
2007
2008 #[test]
2009 fn lower_exp() {
2010 let test = crate::Matrix2::new(1e6, 2e5, 2e-5, 1.);
2011 assert_eq!(
2012 format!("{:e}", test),
2013 r"
2014 ┌ ┐
2015 │ 1e6 2e5 │
2016 │ 2e-5 1e0 │
2017 └ ┘
2018
2019"
2020 )
2021 }
2022}
2023
2024impl<
2026 T: Scalar + ClosedAddAssign + ClosedSubAssign + ClosedMulAssign,
2027 R: Dim,
2028 C: Dim,
2029 S: RawStorage<T, R, C>,
2030> Matrix<T, R, C, S>
2031{
2032 #[inline]
2034 #[must_use]
2035 pub fn perp<R2, C2, SB>(&self, b: &Matrix<T, R2, C2, SB>) -> T
2036 where
2037 R2: Dim,
2038 C2: Dim,
2039 SB: RawStorage<T, R2, C2>,
2040 ShapeConstraint: SameNumberOfRows<R, U2>
2041 + SameNumberOfColumns<C, U1>
2042 + SameNumberOfRows<R2, U2>
2043 + SameNumberOfColumns<C2, U1>,
2044 {
2045 let shape = self.shape();
2046 assert_eq!(
2047 shape,
2048 b.shape(),
2049 "2D vector perpendicular product dimension mismatch."
2050 );
2051 assert_eq!(
2052 shape,
2053 (2, 1),
2054 "2D perpendicular product requires (2, 1) vectors {shape:?}",
2055 );
2056
2057 let ax = unsafe { self.get_unchecked((0, 0)).clone() };
2059 let ay = unsafe { self.get_unchecked((1, 0)).clone() };
2060 let bx = unsafe { b.get_unchecked((0, 0)).clone() };
2061 let by = unsafe { b.get_unchecked((1, 0)).clone() };
2062
2063 ax * by - ay * bx
2064 }
2065
2066 #[inline]
2072 #[must_use]
2073 pub fn cross<R2, C2, SB>(&self, b: &Matrix<T, R2, C2, SB>) -> MatrixCross<T, R, C, R2, C2>
2074 where
2075 R2: Dim,
2076 C2: Dim,
2077 SB: RawStorage<T, R2, C2>,
2078 DefaultAllocator: SameShapeAllocator<R, C, R2, C2>,
2079 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
2080 {
2081 let shape = self.shape();
2082 assert_eq!(shape, b.shape(), "Vector cross product dimension mismatch.");
2083 assert!(
2084 shape == (3, 1) || shape == (1, 3),
2085 "Vector cross product dimension mismatch: must be (3, 1) or (1, 3) but found {shape:?}.",
2086 );
2087
2088 if shape.0 == 3 {
2089 unsafe {
2090 let mut res = Matrix::uninit(Dim::from_usize(3), Dim::from_usize(1));
2091
2092 let ax = self.get_unchecked((0, 0));
2093 let ay = self.get_unchecked((1, 0));
2094 let az = self.get_unchecked((2, 0));
2095
2096 let bx = b.get_unchecked((0, 0));
2097 let by = b.get_unchecked((1, 0));
2098 let bz = b.get_unchecked((2, 0));
2099
2100 *res.get_unchecked_mut((0, 0)) =
2101 MaybeUninit::new(ay.clone() * bz.clone() - az.clone() * by.clone());
2102 *res.get_unchecked_mut((1, 0)) =
2103 MaybeUninit::new(az.clone() * bx.clone() - ax.clone() * bz.clone());
2104 *res.get_unchecked_mut((2, 0)) =
2105 MaybeUninit::new(ax.clone() * by.clone() - ay.clone() * bx.clone());
2106
2107 res.assume_init()
2109 }
2110 } else {
2111 unsafe {
2112 let mut res = Matrix::uninit(Dim::from_usize(1), Dim::from_usize(3));
2113
2114 let ax = self.get_unchecked((0, 0));
2115 let ay = self.get_unchecked((0, 1));
2116 let az = self.get_unchecked((0, 2));
2117
2118 let bx = b.get_unchecked((0, 0));
2119 let by = b.get_unchecked((0, 1));
2120 let bz = b.get_unchecked((0, 2));
2121
2122 *res.get_unchecked_mut((0, 0)) =
2123 MaybeUninit::new(ay.clone() * bz.clone() - az.clone() * by.clone());
2124 *res.get_unchecked_mut((0, 1)) =
2125 MaybeUninit::new(az.clone() * bx.clone() - ax.clone() * bz.clone());
2126 *res.get_unchecked_mut((0, 2)) =
2127 MaybeUninit::new(ax.clone() * by.clone() - ay.clone() * bx.clone());
2128
2129 res.assume_init()
2131 }
2132 }
2133 }
2134}
2135
2136impl<T: Scalar + Field, S: RawStorage<T, U3>> Vector<T, U3, S> {
2137 #[inline]
2139 #[must_use]
2140 pub fn cross_matrix(&self) -> OMatrix<T, U3, U3> {
2141 OMatrix::<T, U3, U3>::new(
2142 T::zero(),
2143 -self[2].clone(),
2144 self[1].clone(),
2145 self[2].clone(),
2146 T::zero(),
2147 -self[0].clone(),
2148 -self[1].clone(),
2149 self[0].clone(),
2150 T::zero(),
2151 )
2152 }
2153}
2154
2155impl<T: SimdComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
2156 #[inline]
2158 #[must_use]
2159 pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> T::SimdRealField
2160 where
2161 SB: Storage<T, R2, C2>,
2162 ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
2163 {
2164 let prod = self.dotc(other);
2165 let n1 = self.norm();
2166 let n2 = other.norm();
2167
2168 if n1.is_zero() || n2.is_zero() {
2169 T::SimdRealField::zero()
2170 } else {
2171 let cang = prod.simd_real() / (n1 * n2);
2172 cang.simd_clamp(-T::SimdRealField::one(), T::SimdRealField::one())
2173 .simd_acos()
2174 }
2175 }
2176}
2177
2178impl<T, R: Dim, C: Dim, S> AbsDiffEq for Unit<Matrix<T, R, C, S>>
2179where
2180 T: Scalar + AbsDiffEq,
2181 S: RawStorage<T, R, C>,
2182 T::Epsilon: Clone,
2183{
2184 type Epsilon = T::Epsilon;
2185
2186 #[inline]
2187 fn default_epsilon() -> Self::Epsilon {
2188 T::default_epsilon()
2189 }
2190
2191 #[inline]
2192 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
2193 self.as_ref().abs_diff_eq(other.as_ref(), epsilon)
2194 }
2195}
2196
2197impl<T, R: Dim, C: Dim, S> RelativeEq for Unit<Matrix<T, R, C, S>>
2198where
2199 T: Scalar + RelativeEq,
2200 S: Storage<T, R, C>,
2201 T::Epsilon: Clone,
2202{
2203 #[inline]
2204 fn default_max_relative() -> Self::Epsilon {
2205 T::default_max_relative()
2206 }
2207
2208 #[inline]
2209 fn relative_eq(
2210 &self,
2211 other: &Self,
2212 epsilon: Self::Epsilon,
2213 max_relative: Self::Epsilon,
2214 ) -> bool {
2215 self.as_ref()
2216 .relative_eq(other.as_ref(), epsilon, max_relative)
2217 }
2218}
2219
2220impl<T, R: Dim, C: Dim, S> UlpsEq for Unit<Matrix<T, R, C, S>>
2221where
2222 T: Scalar + UlpsEq,
2223 S: RawStorage<T, R, C>,
2224 T::Epsilon: Clone,
2225{
2226 #[inline]
2227 fn default_max_ulps() -> u32 {
2228 T::default_max_ulps()
2229 }
2230
2231 #[inline]
2232 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
2233 self.as_ref().ulps_eq(other.as_ref(), epsilon, max_ulps)
2234 }
2235}
2236
2237impl<T, R, C, S> Hash for Matrix<T, R, C, S>
2238where
2239 T: Scalar + Hash,
2240 R: Dim,
2241 C: Dim,
2242 S: RawStorage<T, R, C>,
2243{
2244 fn hash<H: Hasher>(&self, state: &mut H) {
2245 let (nrows, ncols) = self.shape();
2246 (nrows, ncols).hash(state);
2247
2248 for j in 0..ncols {
2249 for i in 0..nrows {
2250 unsafe {
2251 self.get_unchecked((i, j)).hash(state);
2252 }
2253 }
2254 }
2255 }
2256}
2257
2258impl<T, D, S> Unit<Vector<T, D, S>>
2259where
2260 T: Scalar,
2261 D: Dim,
2262 S: RawStorage<T, D, U1>,
2263{
2264 pub fn cast<T2: Scalar>(self) -> Unit<OVector<T2, D>>
2274 where
2275 T: Scalar,
2276 OVector<T2, D>: SupersetOf<Vector<T, D, S>>,
2277 DefaultAllocator: Allocator<D, U1>,
2278 {
2279 Unit::new_unchecked(crate::convert_ref(self.as_ref()))
2280 }
2281}
2282
2283impl<T, S> Matrix<T, U1, U1, S>
2284where
2285 S: RawStorage<T, U1, U1>,
2286{
2287 pub fn as_scalar(&self) -> &T {
2305 &self[(0, 0)]
2306 }
2307 pub fn as_scalar_mut(&mut self) -> &mut T
2327 where
2328 S: RawStorageMut<T, U1>,
2329 {
2330 &mut self[(0, 0)]
2331 }
2332 pub fn to_scalar(&self) -> T
2350 where
2351 T: Clone,
2352 {
2353 self.as_scalar().clone()
2354 }
2355}
2356
2357impl<T> super::alias::Matrix1<T> {
2358 pub fn into_scalar(self) -> T {
2377 let [[scalar]] = self.data.0;
2378 scalar
2379 }
2380}