1use num::{One, Zero};
2
3use approx::{AbsDiffEq, RelativeEq, UlpsEq};
4use std::any::TypeId;
5use std::cmp::Ordering;
6use std::fmt;
7use std::hash::{Hash, Hasher};
8use std::marker::PhantomData;
9use std::mem;
10
11#[cfg(feature = "serde-serialize-no-std")]
12use serde::{Deserialize, Deserializer, Serialize, Serializer};
13
14#[cfg(feature = "rkyv-serialize-no-std")]
15use super::rkyv_wrappers::CustomPhantom;
16#[cfg(feature = "rkyv-serialize")]
17use rkyv::bytecheck;
18#[cfg(feature = "rkyv-serialize-no-std")]
19use rkyv::{with::With, Archive, Archived};
20
21use simba::scalar::{ClosedAddAssign, ClosedMulAssign, ClosedSubAssign, Field, SupersetOf};
22use simba::simd::SimdPartialOrd;
23
24use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
25use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
26use crate::base::dimension::{Dim, DimAdd, DimSum, IsNotStaticOne, U1, U2, U3};
27use crate::base::iter::{
28 ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut,
29};
30use crate::base::storage::{Owned, RawStorage, RawStorageMut, SameShapeStorage};
31use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit};
32use crate::{ArrayStorage, SMatrix, SimdComplexField, Storage, UninitMatrix};
33
34use crate::storage::IsContiguous;
35use crate::uninit::{Init, InitStatus, Uninit};
36#[cfg(any(feature = "std", feature = "alloc"))]
37use crate::{DMatrix, DVector, Dyn, RowDVector, VecStorage};
38use std::mem::MaybeUninit;
39
40pub type SquareMatrix<T, D, S> = Matrix<T, D, D, S>;
42
43pub type Vector<T, D, S> = Matrix<T, D, U1, S>;
45
46pub type RowVector<T, D, S> = Matrix<T, U1, D, S>;
48
49pub type MatrixSum<T, R1, C1, R2, C2> =
51 Matrix<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>, SameShapeStorage<T, R1, C1, R2, C2>>;
52
53pub type VectorSum<T, R1, R2> =
55 Matrix<T, SameShapeR<R1, R2>, U1, SameShapeStorage<T, R1, U1, R2, U1>>;
56
57pub type MatrixCross<T, R1, C1, R2, C2> =
59 Matrix<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>, SameShapeStorage<T, R1, C1, R2, C2>>;
60
61#[repr(C)]
160#[derive(Clone, Copy)]
161#[cfg_attr(
162 feature = "rkyv-serialize-no-std",
163 derive(Archive, rkyv::Serialize, rkyv::Deserialize),
164 archive(
165 as = "Matrix<T::Archived, R, C, S::Archived>",
166 bound(archive = "
167 T: Archive,
168 S: Archive,
169 With<PhantomData<(T, R, C)>, CustomPhantom<(Archived<T>, R, C)>>: Archive<Archived = PhantomData<(Archived<T>, R, C)>>
170 ")
171 )
172)]
173#[cfg_attr(feature = "rkyv-serialize", derive(bytecheck::CheckBytes))]
174pub struct Matrix<T, R, C, S> {
175 pub data: S,
199
200 #[cfg_attr(feature = "rkyv-serialize-no-std", with(CustomPhantom<(T::Archived, R, C)>))]
211 _phantoms: PhantomData<(T, R, C)>,
212}
213
214impl<T, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<T, R, C, S> {
215 fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
216 self.data.fmt(formatter)
217 }
218}
219
220impl<T, R, C, S> Default for Matrix<T, R, C, S>
221where
222 T: Scalar,
223 R: Dim,
224 C: Dim,
225 S: Default,
226{
227 fn default() -> Self {
228 Matrix {
229 data: Default::default(),
230 _phantoms: PhantomData,
231 }
232 }
233}
234
235#[cfg(feature = "serde-serialize-no-std")]
236impl<T, R, C, S> Serialize for Matrix<T, R, C, S>
237where
238 T: Scalar,
239 R: Dim,
240 C: Dim,
241 S: Serialize,
242{
243 fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
244 where
245 Ser: Serializer,
246 {
247 self.data.serialize(serializer)
248 }
249}
250
251#[cfg(feature = "serde-serialize-no-std")]
252impl<'de, T, R, C, S> Deserialize<'de> for Matrix<T, R, C, S>
253where
254 T: Scalar,
255 R: Dim,
256 C: Dim,
257 S: Deserialize<'de>,
258{
259 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
260 where
261 D: Deserializer<'de>,
262 {
263 S::deserialize(deserializer).map(|x| Matrix {
264 data: x,
265 _phantoms: PhantomData,
266 })
267 }
268}
269
270#[cfg(feature = "compare")]
271impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> matrixcompare_core::Matrix<T>
272 for Matrix<T, R, C, S>
273{
274 fn rows(&self) -> usize {
275 self.nrows()
276 }
277
278 fn cols(&self) -> usize {
279 self.ncols()
280 }
281
282 fn access(&self) -> matrixcompare_core::Access<'_, T> {
283 matrixcompare_core::Access::Dense(self)
284 }
285}
286
287#[cfg(feature = "compare")]
288impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> matrixcompare_core::DenseAccess<T>
289 for Matrix<T, R, C, S>
290{
291 fn fetch_single(&self, row: usize, col: usize) -> T {
292 self.index((row, col)).clone()
293 }
294}
295
296#[cfg(feature = "bytemuck")]
297unsafe impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> bytemuck::Zeroable
298 for Matrix<T, R, C, S>
299where
300 S: bytemuck::Zeroable,
301{
302}
303
304#[cfg(feature = "bytemuck")]
305unsafe impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> bytemuck::Pod for Matrix<T, R, C, S>
306where
307 S: bytemuck::Pod,
308 Self: Copy,
309{
310}
311
312impl<T, R, C, S> Matrix<T, R, C, S> {
313 #[inline(always)]
320 pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix<T, R, C, S> {
321 Matrix {
322 data,
323 _phantoms: PhantomData,
324 }
325 }
326}
327
328impl<T, const R: usize, const C: usize> SMatrix<T, R, C> {
329 #[inline(always)]
334 pub const fn from_array_storage(storage: ArrayStorage<T, R, C>) -> Self {
335 unsafe { Self::from_data_statically_unchecked(storage) }
338 }
339}
340
341#[cfg(any(feature = "std", feature = "alloc"))]
344impl<T> DMatrix<T> {
345 pub const fn from_vec_storage(storage: VecStorage<T, Dyn, Dyn>) -> Self {
350 unsafe { Self::from_data_statically_unchecked(storage) }
353 }
354}
355
356#[cfg(any(feature = "std", feature = "alloc"))]
359impl<T> DVector<T> {
360 pub const fn from_vec_storage(storage: VecStorage<T, Dyn, U1>) -> Self {
365 unsafe { Self::from_data_statically_unchecked(storage) }
368 }
369}
370
371#[cfg(any(feature = "std", feature = "alloc"))]
374impl<T> RowDVector<T> {
375 pub const fn from_vec_storage(storage: VecStorage<T, U1, Dyn>) -> Self {
380 unsafe { Self::from_data_statically_unchecked(storage) }
383 }
384}
385
386impl<T: Scalar, R: Dim, C: Dim> UninitMatrix<T, R, C>
387where
388 DefaultAllocator: Allocator<R, C>,
389{
390 #[inline(always)]
396 pub unsafe fn assume_init(self) -> OMatrix<T, R, C> {
397 OMatrix::from_data(<DefaultAllocator as Allocator<R, C>>::assume_init(
398 self.data,
399 ))
400 }
401}
402
403impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
404 #[inline(always)]
406 pub fn from_data(data: S) -> Self {
407 unsafe { Self::from_data_statically_unchecked(data) }
408 }
409
410 #[inline]
419 #[must_use]
420 pub fn shape(&self) -> (usize, usize) {
421 let (nrows, ncols) = self.shape_generic();
422 (nrows.value(), ncols.value())
423 }
424
425 #[inline]
427 #[must_use]
428 pub fn shape_generic(&self) -> (R, C) {
429 self.data.shape()
430 }
431
432 #[inline]
441 #[must_use]
442 pub fn nrows(&self) -> usize {
443 self.shape().0
444 }
445
446 #[inline]
455 #[must_use]
456 pub fn ncols(&self) -> usize {
457 self.shape().1
458 }
459
460 #[inline]
471 #[must_use]
472 pub fn strides(&self) -> (usize, usize) {
473 let (srows, scols) = self.data.strides();
474 (srows.value(), scols.value())
475 }
476
477 #[inline]
490 #[must_use]
491 pub fn vector_to_matrix_index(&self, i: usize) -> (usize, usize) {
492 let (nrows, ncols) = self.shape();
493
494 if nrows == 1 {
497 (0, i)
498 } else if ncols == 1 {
499 (i, 0)
500 } else {
501 (i % nrows, i / nrows)
502 }
503 }
504
505 #[inline]
519 #[must_use]
520 pub fn as_ptr(&self) -> *const T {
521 self.data.ptr()
522 }
523
524 #[inline]
528 #[must_use]
529 pub fn relative_eq<R2, C2, SB>(
530 &self,
531 other: &Matrix<T, R2, C2, SB>,
532 eps: T::Epsilon,
533 max_relative: T::Epsilon,
534 ) -> bool
535 where
536 T: RelativeEq + Scalar,
537 R2: Dim,
538 C2: Dim,
539 SB: Storage<T, R2, C2>,
540 T::Epsilon: Clone,
541 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
542 {
543 assert!(self.shape() == other.shape());
544 self.iter()
545 .zip(other.iter())
546 .all(|(a, b)| a.relative_eq(b, eps.clone(), max_relative.clone()))
547 }
548
549 #[inline]
551 #[must_use]
552 #[allow(clippy::should_implement_trait)]
553 pub fn eq<R2, C2, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
554 where
555 T: PartialEq,
556 R2: Dim,
557 C2: Dim,
558 SB: RawStorage<T, R2, C2>,
559 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
560 {
561 assert!(self.shape() == other.shape());
562 self.iter().zip(other.iter()).all(|(a, b)| *a == *b)
563 }
564
565 #[inline]
567 pub fn into_owned(self) -> OMatrix<T, R, C>
568 where
569 T: Scalar,
570 S: Storage<T, R, C>,
571 DefaultAllocator: Allocator<R, C>,
572 {
573 Matrix::from_data(self.data.into_owned())
574 }
575
576 #[inline]
581 pub fn into_owned_sum<R2, C2>(self) -> MatrixSum<T, R, C, R2, C2>
582 where
583 T: Scalar,
584 S: Storage<T, R, C>,
585 R2: Dim,
586 C2: Dim,
587 DefaultAllocator: SameShapeAllocator<R, C, R2, C2>,
588 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
589 {
590 if TypeId::of::<SameShapeStorage<T, R, C, R2, C2>>() == TypeId::of::<Owned<T, R, C>>() {
591 unsafe {
594 let owned = self.into_owned();
596 let res = mem::transmute_copy(&owned);
597 mem::forget(owned);
598 res
599 }
600 } else {
601 self.clone_owned_sum()
602 }
603 }
604
605 #[inline]
607 #[must_use]
608 pub fn clone_owned(&self) -> OMatrix<T, R, C>
609 where
610 T: Scalar,
611 S: Storage<T, R, C>,
612 DefaultAllocator: Allocator<R, C>,
613 {
614 Matrix::from_data(self.data.clone_owned())
615 }
616
617 #[inline]
620 #[must_use]
621 pub fn clone_owned_sum<R2, C2>(&self) -> MatrixSum<T, R, C, R2, C2>
622 where
623 T: Scalar,
624 S: Storage<T, R, C>,
625 R2: Dim,
626 C2: Dim,
627 DefaultAllocator: SameShapeAllocator<R, C, R2, C2>,
628 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
629 {
630 let (nrows, ncols) = self.shape();
631 let nrows: SameShapeR<R, R2> = Dim::from_usize(nrows);
632 let ncols: SameShapeC<C, C2> = Dim::from_usize(ncols);
633
634 let mut res = Matrix::uninit(nrows, ncols);
635
636 unsafe {
637 for j in 0..res.ncols() {
639 for i in 0..res.nrows() {
640 *res.get_unchecked_mut((i, j)) =
641 MaybeUninit::new(self.get_unchecked((i, j)).clone());
642 }
643 }
644
645 res.assume_init()
647 }
648 }
649
650 #[inline]
652 fn transpose_to_uninit<Status, R2, C2, SB>(
653 &self,
654 _status: Status,
655 out: &mut Matrix<Status::Value, R2, C2, SB>,
656 ) where
657 Status: InitStatus<T>,
658 T: Scalar,
659 R2: Dim,
660 C2: Dim,
661 SB: RawStorageMut<Status::Value, R2, C2>,
662 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
663 {
664 let (nrows, ncols) = self.shape();
665 assert!(
666 (ncols, nrows) == out.shape(),
667 "Incompatible shape for transposition."
668 );
669
670 for i in 0..nrows {
672 for j in 0..ncols {
673 unsafe {
675 Status::init(
676 out.get_unchecked_mut((j, i)),
677 self.get_unchecked((i, j)).clone(),
678 );
679 }
680 }
681 }
682 }
683
684 #[inline]
686 pub fn transpose_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
687 where
688 T: Scalar,
689 R2: Dim,
690 C2: Dim,
691 SB: RawStorageMut<T, R2, C2>,
692 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
693 {
694 self.transpose_to_uninit(Init, out)
695 }
696
697 #[inline]
699 #[must_use = "Did you mean to use transpose_mut()?"]
700 pub fn transpose(&self) -> OMatrix<T, C, R>
701 where
702 T: Scalar,
703 DefaultAllocator: Allocator<C, R>,
704 {
705 let (nrows, ncols) = self.shape_generic();
706
707 let mut res = Matrix::uninit(ncols, nrows);
708 self.transpose_to_uninit(Uninit, &mut res);
709 unsafe { res.assume_init() }
711 }
712}
713
714impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
716 #[inline]
718 #[must_use]
719 pub fn map<T2: Scalar, F: FnMut(T) -> T2>(&self, mut f: F) -> OMatrix<T2, R, C>
720 where
721 T: Scalar,
722 DefaultAllocator: Allocator<R, C>,
723 {
724 let (nrows, ncols) = self.shape_generic();
725 let mut res = Matrix::uninit(nrows, ncols);
726
727 for j in 0..ncols.value() {
728 for i in 0..nrows.value() {
729 unsafe {
731 let a = self.data.get_unchecked(i, j).clone();
732 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a));
733 }
734 }
735 }
736
737 unsafe { res.assume_init() }
739 }
740
741 pub fn cast<T2: Scalar>(self) -> OMatrix<T2, R, C>
751 where
752 T: Scalar,
753 OMatrix<T2, R, C>: SupersetOf<Self>,
754 DefaultAllocator: Allocator<R, C>,
755 {
756 crate::convert(self)
757 }
758
759 pub fn try_cast<T2: Scalar>(self) -> Option<OMatrix<T2, R, C>>
769 where
770 T: Scalar,
771 Self: SupersetOf<OMatrix<T2, R, C>>,
772 DefaultAllocator: Allocator<R, C>,
773 {
774 crate::try_convert(self)
775 }
776
777 #[inline]
785 #[must_use]
786 pub fn fold_with<T2>(
787 &self,
788 init_f: impl FnOnce(Option<&T>) -> T2,
789 f: impl FnMut(T2, &T) -> T2,
790 ) -> T2
791 where
792 T: Scalar,
793 {
794 let mut it = self.iter();
795 let init = init_f(it.next());
796 it.fold(init, f)
797 }
798
799 #[inline]
802 #[must_use]
803 pub fn map_with_location<T2: Scalar, F: FnMut(usize, usize, T) -> T2>(
804 &self,
805 mut f: F,
806 ) -> OMatrix<T2, R, C>
807 where
808 T: Scalar,
809 DefaultAllocator: Allocator<R, C>,
810 {
811 let (nrows, ncols) = self.shape_generic();
812 let mut res = Matrix::uninit(nrows, ncols);
813
814 for j in 0..ncols.value() {
815 for i in 0..nrows.value() {
816 unsafe {
818 let a = self.data.get_unchecked(i, j).clone();
819 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a));
820 }
821 }
822 }
823
824 unsafe { res.assume_init() }
826 }
827
828 #[inline]
831 #[must_use]
832 pub fn zip_map<T2, N3, S2, F>(&self, rhs: &Matrix<T2, R, C, S2>, mut f: F) -> OMatrix<N3, R, C>
833 where
834 T: Scalar,
835 T2: Scalar,
836 N3: Scalar,
837 S2: RawStorage<T2, R, C>,
838 F: FnMut(T, T2) -> N3,
839 DefaultAllocator: Allocator<R, C>,
840 {
841 let (nrows, ncols) = self.shape_generic();
842 let mut res = Matrix::uninit(nrows, ncols);
843
844 assert_eq!(
845 (nrows.value(), ncols.value()),
846 rhs.shape(),
847 "Matrix simultaneous traversal error: dimension mismatch."
848 );
849
850 for j in 0..ncols.value() {
851 for i in 0..nrows.value() {
852 unsafe {
854 let a = self.data.get_unchecked(i, j).clone();
855 let b = rhs.data.get_unchecked(i, j).clone();
856 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b))
857 }
858 }
859 }
860
861 unsafe { res.assume_init() }
863 }
864
865 #[inline]
868 #[must_use]
869 pub fn zip_zip_map<T2, N3, N4, S2, S3, F>(
870 &self,
871 b: &Matrix<T2, R, C, S2>,
872 c: &Matrix<N3, R, C, S3>,
873 mut f: F,
874 ) -> OMatrix<N4, R, C>
875 where
876 T: Scalar,
877 T2: Scalar,
878 N3: Scalar,
879 N4: Scalar,
880 S2: RawStorage<T2, R, C>,
881 S3: RawStorage<N3, R, C>,
882 F: FnMut(T, T2, N3) -> N4,
883 DefaultAllocator: Allocator<R, C>,
884 {
885 let (nrows, ncols) = self.shape_generic();
886 let mut res = Matrix::uninit(nrows, ncols);
887
888 assert_eq!(
889 (nrows.value(), ncols.value()),
890 b.shape(),
891 "Matrix simultaneous traversal error: dimension mismatch."
892 );
893 assert_eq!(
894 (nrows.value(), ncols.value()),
895 c.shape(),
896 "Matrix simultaneous traversal error: dimension mismatch."
897 );
898
899 for j in 0..ncols.value() {
900 for i in 0..nrows.value() {
901 unsafe {
903 let a = self.data.get_unchecked(i, j).clone();
904 let b = b.data.get_unchecked(i, j).clone();
905 let c = c.data.get_unchecked(i, j).clone();
906 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c))
907 }
908 }
909 }
910
911 unsafe { res.assume_init() }
913 }
914
915 #[inline]
917 #[must_use]
918 pub fn fold<Acc>(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc
919 where
920 T: Scalar,
921 {
922 let (nrows, ncols) = self.shape_generic();
923
924 let mut res = init;
925
926 for j in 0..ncols.value() {
927 for i in 0..nrows.value() {
928 unsafe {
930 let a = self.data.get_unchecked(i, j).clone();
931 res = f(res, a)
932 }
933 }
934 }
935
936 res
937 }
938
939 #[inline]
941 #[must_use]
942 pub fn zip_fold<T2, R2, C2, S2, Acc>(
943 &self,
944 rhs: &Matrix<T2, R2, C2, S2>,
945 init: Acc,
946 mut f: impl FnMut(Acc, T, T2) -> Acc,
947 ) -> Acc
948 where
949 T: Scalar,
950 T2: Scalar,
951 R2: Dim,
952 C2: Dim,
953 S2: RawStorage<T2, R2, C2>,
954 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
955 {
956 let (nrows, ncols) = self.shape_generic();
957
958 let mut res = init;
959
960 assert_eq!(
961 (nrows.value(), ncols.value()),
962 rhs.shape(),
963 "Matrix simultaneous traversal error: dimension mismatch."
964 );
965
966 for j in 0..ncols.value() {
967 for i in 0..nrows.value() {
968 unsafe {
969 let a = self.data.get_unchecked(i, j).clone();
970 let b = rhs.data.get_unchecked(i, j).clone();
971 res = f(res, a, b)
972 }
973 }
974 }
975
976 res
977 }
978
979 #[inline]
981 pub fn apply<F: FnMut(&mut T)>(&mut self, mut f: F)
982 where
983 S: RawStorageMut<T, R, C>,
984 {
985 let (nrows, ncols) = self.shape();
986
987 for j in 0..ncols {
988 for i in 0..nrows {
989 unsafe {
990 let e = self.data.get_unchecked_mut(i, j);
991 f(e)
992 }
993 }
994 }
995 }
996
997 #[inline]
1000 pub fn zip_apply<T2, R2, C2, S2>(
1001 &mut self,
1002 rhs: &Matrix<T2, R2, C2, S2>,
1003 mut f: impl FnMut(&mut T, T2),
1004 ) where
1005 S: RawStorageMut<T, R, C>,
1006 T2: Scalar,
1007 R2: Dim,
1008 C2: Dim,
1009 S2: RawStorage<T2, R2, C2>,
1010 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1011 {
1012 let (nrows, ncols) = self.shape();
1013
1014 assert_eq!(
1015 (nrows, ncols),
1016 rhs.shape(),
1017 "Matrix simultaneous traversal error: dimension mismatch."
1018 );
1019
1020 for j in 0..ncols {
1021 for i in 0..nrows {
1022 unsafe {
1023 let e = self.data.get_unchecked_mut(i, j);
1024 let rhs = rhs.get_unchecked((i, j)).clone();
1025 f(e, rhs)
1026 }
1027 }
1028 }
1029 }
1030
1031 #[inline]
1034 pub fn zip_zip_apply<T2, R2, C2, S2, N3, R3, C3, S3>(
1035 &mut self,
1036 b: &Matrix<T2, R2, C2, S2>,
1037 c: &Matrix<N3, R3, C3, S3>,
1038 mut f: impl FnMut(&mut T, T2, N3),
1039 ) where
1040 S: RawStorageMut<T, R, C>,
1041 T2: Scalar,
1042 R2: Dim,
1043 C2: Dim,
1044 S2: RawStorage<T2, R2, C2>,
1045 N3: Scalar,
1046 R3: Dim,
1047 C3: Dim,
1048 S3: RawStorage<N3, R3, C3>,
1049 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1050 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1051 {
1052 let (nrows, ncols) = self.shape();
1053
1054 assert_eq!(
1055 (nrows, ncols),
1056 b.shape(),
1057 "Matrix simultaneous traversal error: dimension mismatch."
1058 );
1059 assert_eq!(
1060 (nrows, ncols),
1061 c.shape(),
1062 "Matrix simultaneous traversal error: dimension mismatch."
1063 );
1064
1065 for j in 0..ncols {
1066 for i in 0..nrows {
1067 unsafe {
1068 let e = self.data.get_unchecked_mut(i, j);
1069 let b = b.get_unchecked((i, j)).clone();
1070 let c = c.get_unchecked((i, j)).clone();
1071 f(e, b, c)
1072 }
1073 }
1074 }
1075 }
1076}
1077
1078impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
1080 #[inline]
1097 pub fn iter(&self) -> MatrixIter<'_, T, R, C, S> {
1098 MatrixIter::new(&self.data)
1099 }
1100
1101 #[inline]
1113 pub fn row_iter(&self) -> RowIter<'_, T, R, C, S> {
1114 RowIter::new(self)
1115 }
1116
1117 #[inline]
1129 pub fn column_iter(&self) -> ColumnIter<'_, T, R, C, S> {
1130 ColumnIter::new(self)
1131 }
1132
1133 #[inline]
1135 pub fn iter_mut(&mut self) -> MatrixIterMut<'_, T, R, C, S>
1136 where
1137 S: RawStorageMut<T, R, C>,
1138 {
1139 MatrixIterMut::new(&mut self.data)
1140 }
1141
1142 #[inline]
1158 pub fn row_iter_mut(&mut self) -> RowIterMut<'_, T, R, C, S>
1159 where
1160 S: RawStorageMut<T, R, C>,
1161 {
1162 RowIterMut::new(self)
1163 }
1164
1165 #[inline]
1181 pub fn column_iter_mut(&mut self) -> ColumnIterMut<'_, T, R, C, S>
1182 where
1183 S: RawStorageMut<T, R, C>,
1184 {
1185 ColumnIterMut::new(self)
1186 }
1187}
1188
1189impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
1190 #[inline]
1195 pub fn as_mut_ptr(&mut self) -> *mut T {
1196 self.data.ptr_mut()
1197 }
1198
1199 #[inline]
1205 pub unsafe fn swap_unchecked(&mut self, row_cols1: (usize, usize), row_cols2: (usize, usize)) {
1206 debug_assert!(row_cols1.0 < self.nrows() && row_cols1.1 < self.ncols());
1207 debug_assert!(row_cols2.0 < self.nrows() && row_cols2.1 < self.ncols());
1208 self.data.swap_unchecked(row_cols1, row_cols2)
1209 }
1210
1211 #[inline]
1213 pub fn swap(&mut self, row_cols1: (usize, usize), row_cols2: (usize, usize)) {
1214 let (nrows, ncols) = self.shape();
1215 assert!(
1216 row_cols1.0 < nrows && row_cols1.1 < ncols,
1217 "Matrix elements swap index out of bounds."
1218 );
1219 assert!(
1220 row_cols2.0 < nrows && row_cols2.1 < ncols,
1221 "Matrix elements swap index out of bounds."
1222 );
1223 unsafe { self.swap_unchecked(row_cols1, row_cols2) }
1224 }
1225
1226 #[inline]
1230 pub fn copy_from_slice(&mut self, slice: &[T])
1231 where
1232 T: Scalar,
1233 {
1234 let (nrows, ncols) = self.shape();
1235
1236 assert!(
1237 nrows * ncols == slice.len(),
1238 "The slice must contain the same number of elements as the matrix."
1239 );
1240
1241 for j in 0..ncols {
1242 for i in 0..nrows {
1243 unsafe {
1244 *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone();
1245 }
1246 }
1247 }
1248 }
1249
1250 #[inline]
1252 pub fn copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
1253 where
1254 T: Scalar,
1255 R2: Dim,
1256 C2: Dim,
1257 SB: RawStorage<T, R2, C2>,
1258 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1259 {
1260 assert!(
1261 self.shape() == other.shape(),
1262 "Unable to copy from a matrix with a different shape."
1263 );
1264
1265 for j in 0..self.ncols() {
1266 for i in 0..self.nrows() {
1267 unsafe {
1268 *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone();
1269 }
1270 }
1271 }
1272 }
1273
1274 #[inline]
1276 pub fn tr_copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
1277 where
1278 T: Scalar,
1279 R2: Dim,
1280 C2: Dim,
1281 SB: RawStorage<T, R2, C2>,
1282 ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
1283 {
1284 let (nrows, ncols) = self.shape();
1285 assert!(
1286 (ncols, nrows) == other.shape(),
1287 "Unable to copy from a matrix with incompatible shape."
1288 );
1289
1290 for j in 0..ncols {
1291 for i in 0..nrows {
1292 unsafe {
1293 *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone();
1294 }
1295 }
1296 }
1297 }
1298
1299 #[inline]
1302 pub fn apply_into<F: FnMut(&mut T)>(mut self, f: F) -> Self {
1303 self.apply(f);
1304 self
1305 }
1306}
1307
1308impl<T, D: Dim, S: RawStorage<T, D>> Vector<T, D, S> {
1309 #[inline]
1313 #[must_use]
1314 pub unsafe fn vget_unchecked(&self, i: usize) -> &T {
1315 debug_assert!(i < self.nrows(), "Vector index out of bounds.");
1316 let i = i * self.strides().0;
1317 self.data.get_unchecked_linear(i)
1318 }
1319}
1320
1321impl<T, D: Dim, S: RawStorageMut<T, D>> Vector<T, D, S> {
1322 #[inline]
1326 #[must_use]
1327 pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut T {
1328 debug_assert!(i < self.nrows(), "Vector index out of bounds.");
1329 let i = i * self.strides().0;
1330 self.data.get_unchecked_linear_mut(i)
1331 }
1332}
1333
1334impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C> + IsContiguous> Matrix<T, R, C, S> {
1335 #[inline]
1337 #[must_use]
1338 pub fn as_slice(&self) -> &[T] {
1339 unsafe { self.data.as_slice_unchecked() }
1341 }
1342}
1343
1344impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C> + IsContiguous> AsRef<[T]> for Matrix<T, R, C, S> {
1345 #[inline]
1347 fn as_ref(&self) -> &[T] {
1348 self.as_slice()
1349 }
1350}
1351
1352impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C> + IsContiguous> Matrix<T, R, C, S> {
1353 #[inline]
1355 #[must_use]
1356 pub fn as_mut_slice(&mut self) -> &mut [T] {
1357 unsafe { self.data.as_mut_slice_unchecked() }
1359 }
1360}
1361
1362impl<T: Scalar, D: Dim, S: RawStorageMut<T, D, D>> Matrix<T, D, D, S> {
1363 pub fn transpose_mut(&mut self) {
1365 assert!(
1366 self.is_square(),
1367 "Unable to transpose a non-square matrix in-place."
1368 );
1369
1370 let dim = self.shape().0;
1371
1372 for i in 1..dim {
1373 for j in 0..i {
1374 unsafe { self.swap_unchecked((i, j), (j, i)) }
1375 }
1376 }
1377 }
1378}
1379
1380impl<T: SimdComplexField, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
1381 #[inline]
1383 fn adjoint_to_uninit<Status, R2, C2, SB>(
1384 &self,
1385 _status: Status,
1386 out: &mut Matrix<Status::Value, R2, C2, SB>,
1387 ) where
1388 Status: InitStatus<T>,
1389 R2: Dim,
1390 C2: Dim,
1391 SB: RawStorageMut<Status::Value, R2, C2>,
1392 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1393 {
1394 let (nrows, ncols) = self.shape();
1395 assert!(
1396 (ncols, nrows) == out.shape(),
1397 "Incompatible shape for transpose-copy."
1398 );
1399
1400 for i in 0..nrows {
1402 for j in 0..ncols {
1403 unsafe {
1405 Status::init(
1406 out.get_unchecked_mut((j, i)),
1407 self.get_unchecked((i, j)).clone().simd_conjugate(),
1408 );
1409 }
1410 }
1411 }
1412 }
1413
1414 #[inline]
1416 pub fn adjoint_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
1417 where
1418 R2: Dim,
1419 C2: Dim,
1420 SB: RawStorageMut<T, R2, C2>,
1421 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1422 {
1423 self.adjoint_to_uninit(Init, out)
1424 }
1425
1426 #[inline]
1428 #[must_use = "Did you mean to use adjoint_mut()?"]
1429 pub fn adjoint(&self) -> OMatrix<T, C, R>
1430 where
1431 DefaultAllocator: Allocator<C, R>,
1432 {
1433 let (nrows, ncols) = self.shape_generic();
1434
1435 let mut res = Matrix::uninit(ncols, nrows);
1436 self.adjoint_to_uninit(Uninit, &mut res);
1437
1438 unsafe { res.assume_init() }
1440 }
1441
1442 #[deprecated(note = "Renamed `self.adjoint_to(out)`.")]
1444 #[inline]
1445 pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
1446 where
1447 R2: Dim,
1448 C2: Dim,
1449 SB: RawStorageMut<T, R2, C2>,
1450 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1451 {
1452 self.adjoint_to(out)
1453 }
1454
1455 #[deprecated(note = "Renamed `self.adjoint()`.")]
1457 #[inline]
1458 pub fn conjugate_transpose(&self) -> OMatrix<T, C, R>
1459 where
1460 DefaultAllocator: Allocator<C, R>,
1461 {
1462 self.adjoint()
1463 }
1464
1465 #[inline]
1467 #[must_use = "Did you mean to use conjugate_mut()?"]
1468 pub fn conjugate(&self) -> OMatrix<T, R, C>
1469 where
1470 DefaultAllocator: Allocator<R, C>,
1471 {
1472 self.map(|e| e.simd_conjugate())
1473 }
1474
1475 #[inline]
1477 #[must_use = "Did you mean to use unscale_mut()?"]
1478 pub fn unscale(&self, real: T::SimdRealField) -> OMatrix<T, R, C>
1479 where
1480 DefaultAllocator: Allocator<R, C>,
1481 {
1482 self.map(|e| e.simd_unscale(real.clone()))
1483 }
1484
1485 #[inline]
1487 #[must_use = "Did you mean to use scale_mut()?"]
1488 pub fn scale(&self, real: T::SimdRealField) -> OMatrix<T, R, C>
1489 where
1490 DefaultAllocator: Allocator<R, C>,
1491 {
1492 self.map(|e| e.simd_scale(real.clone()))
1493 }
1494}
1495
1496impl<T: SimdComplexField, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
1497 #[inline]
1499 pub fn conjugate_mut(&mut self) {
1500 self.apply(|e| *e = e.clone().simd_conjugate())
1501 }
1502
1503 #[inline]
1505 pub fn unscale_mut(&mut self, real: T::SimdRealField) {
1506 self.apply(|e| *e = e.clone().simd_unscale(real.clone()))
1507 }
1508
1509 #[inline]
1511 pub fn scale_mut(&mut self, real: T::SimdRealField) {
1512 self.apply(|e| *e = e.clone().simd_scale(real.clone()))
1513 }
1514}
1515
1516impl<T: SimdComplexField, D: Dim, S: RawStorageMut<T, D, D>> Matrix<T, D, D, S> {
1517 #[deprecated(note = "Renamed to `self.adjoint_mut()`.")]
1519 pub fn conjugate_transform_mut(&mut self) {
1520 self.adjoint_mut()
1521 }
1522
1523 pub fn adjoint_mut(&mut self) {
1525 assert!(
1526 self.is_square(),
1527 "Unable to transpose a non-square matrix in-place."
1528 );
1529
1530 let dim = self.shape().0;
1531
1532 for i in 0..dim {
1533 for j in 0..i {
1534 unsafe {
1535 let ref_ij = self.get_unchecked((i, j)).clone();
1536 let ref_ji = self.get_unchecked((j, i)).clone();
1537 let conj_ij = ref_ij.simd_conjugate();
1538 let conj_ji = ref_ji.simd_conjugate();
1539 *self.get_unchecked_mut((i, j)) = conj_ji;
1540 *self.get_unchecked_mut((j, i)) = conj_ij;
1541 }
1542 }
1543
1544 {
1545 let diag = unsafe { self.get_unchecked_mut((i, i)) };
1546 *diag = diag.clone().simd_conjugate();
1547 }
1548 }
1549 }
1550}
1551
1552impl<T: Scalar, D: Dim, S: RawStorage<T, D, D>> SquareMatrix<T, D, S> {
1553 #[inline]
1555 #[must_use]
1556 pub fn diagonal(&self) -> OVector<T, D>
1557 where
1558 DefaultAllocator: Allocator<D>,
1559 {
1560 self.map_diagonal(|e| e)
1561 }
1562
1563 #[must_use]
1568 pub fn map_diagonal<T2: Scalar>(&self, mut f: impl FnMut(T) -> T2) -> OVector<T2, D>
1569 where
1570 DefaultAllocator: Allocator<D>,
1571 {
1572 assert!(
1573 self.is_square(),
1574 "Unable to get the diagonal of a non-square matrix."
1575 );
1576
1577 let dim = self.shape_generic().0;
1578 let mut res = Matrix::uninit(dim, Const::<1>);
1579
1580 for i in 0..dim.value() {
1581 unsafe {
1583 *res.vget_unchecked_mut(i) =
1584 MaybeUninit::new(f(self.get_unchecked((i, i)).clone()));
1585 }
1586 }
1587
1588 unsafe { res.assume_init() }
1590 }
1591
1592 #[inline]
1594 #[must_use]
1595 pub fn trace(&self) -> T
1596 where
1597 T: Scalar + Zero + ClosedAddAssign,
1598 {
1599 assert!(
1600 self.is_square(),
1601 "Cannot compute the trace of non-square matrix."
1602 );
1603
1604 let dim = self.shape_generic().0;
1605 let mut res = T::zero();
1606
1607 for i in 0..dim.value() {
1608 res += unsafe { self.get_unchecked((i, i)).clone() };
1609 }
1610
1611 res
1612 }
1613}
1614
1615impl<T: SimdComplexField, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
1616 #[inline]
1618 #[must_use]
1619 pub fn symmetric_part(&self) -> OMatrix<T, D, D>
1620 where
1621 DefaultAllocator: Allocator<D, D>,
1622 {
1623 assert!(
1624 self.is_square(),
1625 "Cannot compute the symmetric part of a non-square matrix."
1626 );
1627 let mut tr = self.transpose();
1628 tr += self;
1629 tr *= crate::convert::<_, T>(0.5);
1630 tr
1631 }
1632
1633 #[inline]
1635 #[must_use]
1636 pub fn hermitian_part(&self) -> OMatrix<T, D, D>
1637 where
1638 DefaultAllocator: Allocator<D, D>,
1639 {
1640 assert!(
1641 self.is_square(),
1642 "Cannot compute the hermitian part of a non-square matrix."
1643 );
1644
1645 let mut tr = self.adjoint();
1646 tr += self;
1647 tr *= crate::convert::<_, T>(0.5);
1648 tr
1649 }
1650}
1651
1652impl<T: Scalar + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: RawStorage<T, D, D>>
1653 Matrix<T, D, D, S>
1654{
1655 #[inline]
1658 #[must_use]
1659 pub fn to_homogeneous(&self) -> OMatrix<T, DimSum<D, U1>, DimSum<D, U1>>
1660 where
1661 DefaultAllocator: Allocator<DimSum<D, U1>, DimSum<D, U1>>,
1662 {
1663 assert!(
1664 self.is_square(),
1665 "Only square matrices can currently be transformed to homogeneous coordinates."
1666 );
1667 let dim = DimSum::<D, U1>::from_usize(self.nrows() + 1);
1668 let mut res = OMatrix::identity_generic(dim, dim);
1669 res.generic_view_mut::<D, D>((0, 0), self.shape_generic())
1670 .copy_from(self);
1671 res
1672 }
1673}
1674
1675impl<T: Scalar + Zero, D: DimAdd<U1>, S: RawStorage<T, D>> Vector<T, D, S> {
1676 #[inline]
1679 #[must_use]
1680 pub fn to_homogeneous(&self) -> OVector<T, DimSum<D, U1>>
1681 where
1682 DefaultAllocator: Allocator<DimSum<D, U1>>,
1683 {
1684 self.push(T::zero())
1685 }
1686
1687 #[inline]
1690 pub fn from_homogeneous<SB>(v: Vector<T, DimSum<D, U1>, SB>) -> Option<OVector<T, D>>
1691 where
1692 SB: RawStorage<T, DimSum<D, U1>>,
1693 DefaultAllocator: Allocator<D>,
1694 {
1695 if v[v.len() - 1].is_zero() {
1696 let nrows = D::from_usize(v.len() - 1);
1697 Some(v.generic_view((0, 0), (nrows, Const::<1>)).into_owned())
1698 } else {
1699 None
1700 }
1701 }
1702}
1703
1704impl<T: Scalar, D: DimAdd<U1>, S: RawStorage<T, D>> Vector<T, D, S> {
1705 #[inline]
1707 #[must_use]
1708 pub fn push(&self, element: T) -> OVector<T, DimSum<D, U1>>
1709 where
1710 DefaultAllocator: Allocator<DimSum<D, U1>>,
1711 {
1712 let len = self.len();
1713 let hnrows = DimSum::<D, U1>::from_usize(len + 1);
1714 let mut res = Matrix::uninit(hnrows, Const::<1>);
1715 res.generic_view_mut((0, 0), self.shape_generic())
1718 .zip_apply(self, |out, e| *out = MaybeUninit::new(e));
1719 res[(len, 0)] = MaybeUninit::new(element);
1720
1721 unsafe { res.assume_init() }
1723 }
1724}
1725
1726impl<T, R: Dim, C: Dim, S> AbsDiffEq for Matrix<T, R, C, S>
1727where
1728 T: Scalar + AbsDiffEq,
1729 S: RawStorage<T, R, C>,
1730 T::Epsilon: Clone,
1731{
1732 type Epsilon = T::Epsilon;
1733
1734 #[inline]
1735 fn default_epsilon() -> Self::Epsilon {
1736 T::default_epsilon()
1737 }
1738
1739 #[inline]
1740 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
1741 self.iter()
1742 .zip(other.iter())
1743 .all(|(a, b)| a.abs_diff_eq(b, epsilon.clone()))
1744 }
1745}
1746
1747impl<T, R: Dim, C: Dim, S> RelativeEq for Matrix<T, R, C, S>
1748where
1749 T: Scalar + RelativeEq,
1750 S: Storage<T, R, C>,
1751 T::Epsilon: Clone,
1752{
1753 #[inline]
1754 fn default_max_relative() -> Self::Epsilon {
1755 T::default_max_relative()
1756 }
1757
1758 #[inline]
1759 fn relative_eq(
1760 &self,
1761 other: &Self,
1762 epsilon: Self::Epsilon,
1763 max_relative: Self::Epsilon,
1764 ) -> bool {
1765 self.relative_eq(other, epsilon, max_relative)
1766 }
1767}
1768
1769impl<T, R: Dim, C: Dim, S> UlpsEq for Matrix<T, R, C, S>
1770where
1771 T: Scalar + UlpsEq,
1772 S: RawStorage<T, R, C>,
1773 T::Epsilon: Clone,
1774{
1775 #[inline]
1776 fn default_max_ulps() -> u32 {
1777 T::default_max_ulps()
1778 }
1779
1780 #[inline]
1781 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
1782 assert!(self.shape() == other.shape());
1783 self.iter()
1784 .zip(other.iter())
1785 .all(|(a, b)| a.ulps_eq(b, epsilon.clone(), max_ulps))
1786 }
1787}
1788
1789impl<T, R: Dim, C: Dim, S> PartialOrd for Matrix<T, R, C, S>
1790where
1791 T: Scalar + PartialOrd,
1792 S: RawStorage<T, R, C>,
1793{
1794 #[inline]
1795 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1796 if self.shape() != other.shape() {
1797 return None;
1798 }
1799
1800 if self.nrows() == 0 || self.ncols() == 0 {
1801 return Some(Ordering::Equal);
1802 }
1803
1804 let mut first_ord = unsafe {
1805 self.data
1806 .get_unchecked_linear(0)
1807 .partial_cmp(other.data.get_unchecked_linear(0))
1808 };
1809
1810 if let Some(first_ord) = first_ord.as_mut() {
1811 let mut it = self.iter().zip(other.iter());
1812 let _ = it.next(); for (left, right) in it {
1815 if let Some(ord) = left.partial_cmp(right) {
1816 match ord {
1817 Ordering::Equal => { }
1818 Ordering::Less => {
1819 if *first_ord == Ordering::Greater {
1820 return None;
1821 }
1822 *first_ord = ord
1823 }
1824 Ordering::Greater => {
1825 if *first_ord == Ordering::Less {
1826 return None;
1827 }
1828 *first_ord = ord
1829 }
1830 }
1831 } else {
1832 return None;
1833 }
1834 }
1835 }
1836
1837 first_ord
1838 }
1839
1840 #[inline]
1841 fn lt(&self, right: &Self) -> bool {
1842 assert_eq!(
1843 self.shape(),
1844 right.shape(),
1845 "Matrix comparison error: dimensions mismatch."
1846 );
1847 self.iter().zip(right.iter()).all(|(a, b)| a.lt(b))
1848 }
1849
1850 #[inline]
1851 fn le(&self, right: &Self) -> bool {
1852 assert_eq!(
1853 self.shape(),
1854 right.shape(),
1855 "Matrix comparison error: dimensions mismatch."
1856 );
1857 self.iter().zip(right.iter()).all(|(a, b)| a.le(b))
1858 }
1859
1860 #[inline]
1861 fn gt(&self, right: &Self) -> bool {
1862 assert_eq!(
1863 self.shape(),
1864 right.shape(),
1865 "Matrix comparison error: dimensions mismatch."
1866 );
1867 self.iter().zip(right.iter()).all(|(a, b)| a.gt(b))
1868 }
1869
1870 #[inline]
1871 fn ge(&self, right: &Self) -> bool {
1872 assert_eq!(
1873 self.shape(),
1874 right.shape(),
1875 "Matrix comparison error: dimensions mismatch."
1876 );
1877 self.iter().zip(right.iter()).all(|(a, b)| a.ge(b))
1878 }
1879}
1880
1881impl<T, R: Dim, C: Dim, S> Eq for Matrix<T, R, C, S>
1882where
1883 T: Eq,
1884 S: RawStorage<T, R, C>,
1885{
1886}
1887
1888impl<T, R, R2, C, C2, S, S2> PartialEq<Matrix<T, R2, C2, S2>> for Matrix<T, R, C, S>
1889where
1890 T: PartialEq,
1891 C: Dim,
1892 C2: Dim,
1893 R: Dim,
1894 R2: Dim,
1895 S: RawStorage<T, R, C>,
1896 S2: RawStorage<T, R2, C2>,
1897{
1898 #[inline]
1899 fn eq(&self, right: &Matrix<T, R2, C2, S2>) -> bool {
1900 self.shape() == right.shape() && self.iter().zip(right.iter()).all(|(l, r)| l == r)
1901 }
1902}
1903
1904macro_rules! impl_fmt {
1905 ($trait: path, $fmt_str_without_precision: expr, $fmt_str_with_precision: expr) => {
1906 impl<T, R: Dim, C: Dim, S> $trait for Matrix<T, R, C, S>
1907 where
1908 T: Scalar + $trait,
1909 S: RawStorage<T, R, C>,
1910 {
1911 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1912 #[cfg(feature = "std")]
1913 fn val_width<T: Scalar + $trait>(val: &T, f: &mut fmt::Formatter<'_>) -> usize {
1914 match f.precision() {
1915 Some(precision) => format!($fmt_str_with_precision, val, precision)
1916 .chars()
1917 .count(),
1918 None => format!($fmt_str_without_precision, val).chars().count(),
1919 }
1920 }
1921
1922 #[cfg(not(feature = "std"))]
1923 fn val_width<T: Scalar + $trait>(_: &T, _: &mut fmt::Formatter<'_>) -> usize {
1924 4
1925 }
1926
1927 let (nrows, ncols) = self.shape();
1928
1929 if nrows == 0 || ncols == 0 {
1930 return write!(f, "[ ]");
1931 }
1932
1933 let mut max_length = 0;
1934
1935 for i in 0..nrows {
1936 for j in 0..ncols {
1937 max_length = crate::max(max_length, val_width(&self[(i, j)], f));
1938 }
1939 }
1940
1941 let max_length_with_space = max_length + 1;
1942
1943 writeln!(f)?;
1944 writeln!(
1945 f,
1946 " ┌ {:>width$} ┐",
1947 "",
1948 width = max_length_with_space * ncols - 1
1949 )?;
1950
1951 for i in 0..nrows {
1952 write!(f, " │")?;
1953 for j in 0..ncols {
1954 let number_length = val_width(&self[(i, j)], f) + 1;
1955 let pad = max_length_with_space - number_length;
1956 write!(f, " {:>thepad$}", "", thepad = pad)?;
1957 match f.precision() {
1958 Some(precision) => {
1959 write!(f, $fmt_str_with_precision, (*self)[(i, j)], precision)?
1960 }
1961 None => write!(f, $fmt_str_without_precision, (*self)[(i, j)])?,
1962 }
1963 }
1964 writeln!(f, " │")?;
1965 }
1966
1967 writeln!(
1968 f,
1969 " └ {:>width$} ┘",
1970 "",
1971 width = max_length_with_space * ncols - 1
1972 )?;
1973 writeln!(f)
1974 }
1975 }
1976 };
1977}
1978impl_fmt!(fmt::Display, "{}", "{:.1$}");
1979impl_fmt!(fmt::LowerExp, "{:e}", "{:.1$e}");
1980impl_fmt!(fmt::UpperExp, "{:E}", "{:.1$E}");
1981impl_fmt!(fmt::Octal, "{:o}", "{:1$o}");
1982impl_fmt!(fmt::LowerHex, "{:x}", "{:1$x}");
1983impl_fmt!(fmt::UpperHex, "{:X}", "{:1$X}");
1984impl_fmt!(fmt::Binary, "{:b}", "{:.1$b}");
1985impl_fmt!(fmt::Pointer, "{:p}", "{:.1$p}");
1986
1987#[cfg(test)]
1988mod tests {
1989 #[test]
1990 fn empty_display() {
1991 let vec: Vec<f64> = Vec::new();
1992 let dvector = crate::DVector::from_vec(vec);
1993 assert_eq!(format!("{}", dvector), "[ ]")
1994 }
1995
1996 #[test]
1997 fn lower_exp() {
1998 let test = crate::Matrix2::new(1e6, 2e5, 2e-5, 1.);
1999 assert_eq!(
2000 format!("{:e}", test),
2001 r"
2002 ┌ ┐
2003 │ 1e6 2e5 │
2004 │ 2e-5 1e0 │
2005 └ ┘
2006
2007"
2008 )
2009 }
2010}
2011
2012impl<
2014 T: Scalar + ClosedAddAssign + ClosedSubAssign + ClosedMulAssign,
2015 R: Dim,
2016 C: Dim,
2017 S: RawStorage<T, R, C>,
2018 > Matrix<T, R, C, S>
2019{
2020 #[inline]
2022 #[must_use]
2023 pub fn perp<R2, C2, SB>(&self, b: &Matrix<T, R2, C2, SB>) -> T
2024 where
2025 R2: Dim,
2026 C2: Dim,
2027 SB: RawStorage<T, R2, C2>,
2028 ShapeConstraint: SameNumberOfRows<R, U2>
2029 + SameNumberOfColumns<C, U1>
2030 + SameNumberOfRows<R2, U2>
2031 + SameNumberOfColumns<C2, U1>,
2032 {
2033 let shape = self.shape();
2034 assert_eq!(
2035 shape,
2036 b.shape(),
2037 "2D vector perpendicular product dimension mismatch."
2038 );
2039 assert_eq!(
2040 shape,
2041 (2, 1),
2042 "2D perpendicular product requires (2, 1) vectors {:?}",
2043 shape
2044 );
2045
2046 let ax = unsafe { self.get_unchecked((0, 0)).clone() };
2048 let ay = unsafe { self.get_unchecked((1, 0)).clone() };
2049 let bx = unsafe { b.get_unchecked((0, 0)).clone() };
2050 let by = unsafe { b.get_unchecked((1, 0)).clone() };
2051
2052 ax * by - ay * bx
2053 }
2054
2055 #[inline]
2061 #[must_use]
2062 pub fn cross<R2, C2, SB>(&self, b: &Matrix<T, R2, C2, SB>) -> MatrixCross<T, R, C, R2, C2>
2063 where
2064 R2: Dim,
2065 C2: Dim,
2066 SB: RawStorage<T, R2, C2>,
2067 DefaultAllocator: SameShapeAllocator<R, C, R2, C2>,
2068 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
2069 {
2070 let shape = self.shape();
2071 assert_eq!(shape, b.shape(), "Vector cross product dimension mismatch.");
2072 assert!(
2073 shape == (3, 1) || shape == (1, 3),
2074 "Vector cross product dimension mismatch: must be (3, 1) or (1, 3) but found {:?}.",
2075 shape
2076 );
2077
2078 if shape.0 == 3 {
2079 unsafe {
2080 let mut res = Matrix::uninit(Dim::from_usize(3), Dim::from_usize(1));
2081
2082 let ax = self.get_unchecked((0, 0));
2083 let ay = self.get_unchecked((1, 0));
2084 let az = self.get_unchecked((2, 0));
2085
2086 let bx = b.get_unchecked((0, 0));
2087 let by = b.get_unchecked((1, 0));
2088 let bz = b.get_unchecked((2, 0));
2089
2090 *res.get_unchecked_mut((0, 0)) =
2091 MaybeUninit::new(ay.clone() * bz.clone() - az.clone() * by.clone());
2092 *res.get_unchecked_mut((1, 0)) =
2093 MaybeUninit::new(az.clone() * bx.clone() - ax.clone() * bz.clone());
2094 *res.get_unchecked_mut((2, 0)) =
2095 MaybeUninit::new(ax.clone() * by.clone() - ay.clone() * bx.clone());
2096
2097 res.assume_init()
2099 }
2100 } else {
2101 unsafe {
2102 let mut res = Matrix::uninit(Dim::from_usize(1), Dim::from_usize(3));
2103
2104 let ax = self.get_unchecked((0, 0));
2105 let ay = self.get_unchecked((0, 1));
2106 let az = self.get_unchecked((0, 2));
2107
2108 let bx = b.get_unchecked((0, 0));
2109 let by = b.get_unchecked((0, 1));
2110 let bz = b.get_unchecked((0, 2));
2111
2112 *res.get_unchecked_mut((0, 0)) =
2113 MaybeUninit::new(ay.clone() * bz.clone() - az.clone() * by.clone());
2114 *res.get_unchecked_mut((0, 1)) =
2115 MaybeUninit::new(az.clone() * bx.clone() - ax.clone() * bz.clone());
2116 *res.get_unchecked_mut((0, 2)) =
2117 MaybeUninit::new(ax.clone() * by.clone() - ay.clone() * bx.clone());
2118
2119 res.assume_init()
2121 }
2122 }
2123 }
2124}
2125
2126impl<T: Scalar + Field, S: RawStorage<T, U3>> Vector<T, U3, S> {
2127 #[inline]
2129 #[must_use]
2130 pub fn cross_matrix(&self) -> OMatrix<T, U3, U3> {
2131 OMatrix::<T, U3, U3>::new(
2132 T::zero(),
2133 -self[2].clone(),
2134 self[1].clone(),
2135 self[2].clone(),
2136 T::zero(),
2137 -self[0].clone(),
2138 -self[1].clone(),
2139 self[0].clone(),
2140 T::zero(),
2141 )
2142 }
2143}
2144
2145impl<T: SimdComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
2146 #[inline]
2148 #[must_use]
2149 pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> T::SimdRealField
2150 where
2151 SB: Storage<T, R2, C2>,
2152 ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
2153 {
2154 let prod = self.dotc(other);
2155 let n1 = self.norm();
2156 let n2 = other.norm();
2157
2158 if n1.is_zero() || n2.is_zero() {
2159 T::SimdRealField::zero()
2160 } else {
2161 let cang = prod.simd_real() / (n1 * n2);
2162 cang.simd_clamp(-T::SimdRealField::one(), T::SimdRealField::one())
2163 .simd_acos()
2164 }
2165 }
2166}
2167
2168impl<T, R: Dim, C: Dim, S> AbsDiffEq for Unit<Matrix<T, R, C, S>>
2169where
2170 T: Scalar + AbsDiffEq,
2171 S: RawStorage<T, R, C>,
2172 T::Epsilon: Clone,
2173{
2174 type Epsilon = T::Epsilon;
2175
2176 #[inline]
2177 fn default_epsilon() -> Self::Epsilon {
2178 T::default_epsilon()
2179 }
2180
2181 #[inline]
2182 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
2183 self.as_ref().abs_diff_eq(other.as_ref(), epsilon)
2184 }
2185}
2186
2187impl<T, R: Dim, C: Dim, S> RelativeEq for Unit<Matrix<T, R, C, S>>
2188where
2189 T: Scalar + RelativeEq,
2190 S: Storage<T, R, C>,
2191 T::Epsilon: Clone,
2192{
2193 #[inline]
2194 fn default_max_relative() -> Self::Epsilon {
2195 T::default_max_relative()
2196 }
2197
2198 #[inline]
2199 fn relative_eq(
2200 &self,
2201 other: &Self,
2202 epsilon: Self::Epsilon,
2203 max_relative: Self::Epsilon,
2204 ) -> bool {
2205 self.as_ref()
2206 .relative_eq(other.as_ref(), epsilon, max_relative)
2207 }
2208}
2209
2210impl<T, R: Dim, C: Dim, S> UlpsEq for Unit<Matrix<T, R, C, S>>
2211where
2212 T: Scalar + UlpsEq,
2213 S: RawStorage<T, R, C>,
2214 T::Epsilon: Clone,
2215{
2216 #[inline]
2217 fn default_max_ulps() -> u32 {
2218 T::default_max_ulps()
2219 }
2220
2221 #[inline]
2222 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
2223 self.as_ref().ulps_eq(other.as_ref(), epsilon, max_ulps)
2224 }
2225}
2226
2227impl<T, R, C, S> Hash for Matrix<T, R, C, S>
2228where
2229 T: Scalar + Hash,
2230 R: Dim,
2231 C: Dim,
2232 S: RawStorage<T, R, C>,
2233{
2234 fn hash<H: Hasher>(&self, state: &mut H) {
2235 let (nrows, ncols) = self.shape();
2236 (nrows, ncols).hash(state);
2237
2238 for j in 0..ncols {
2239 for i in 0..nrows {
2240 unsafe {
2241 self.get_unchecked((i, j)).hash(state);
2242 }
2243 }
2244 }
2245 }
2246}
2247
2248impl<T, D, S> Unit<Vector<T, D, S>>
2249where
2250 T: Scalar,
2251 D: Dim,
2252 S: RawStorage<T, D, U1>,
2253{
2254 pub fn cast<T2: Scalar>(self) -> Unit<OVector<T2, D>>
2264 where
2265 T: Scalar,
2266 OVector<T2, D>: SupersetOf<Vector<T, D, S>>,
2267 DefaultAllocator: Allocator<D, U1>,
2268 {
2269 Unit::new_unchecked(crate::convert_ref(self.as_ref()))
2270 }
2271}
2272
2273impl<T, S> Matrix<T, U1, U1, S>
2274where
2275 S: RawStorage<T, U1, U1>,
2276{
2277 pub fn as_scalar(&self) -> &T {
2295 &self[(0, 0)]
2296 }
2297 pub fn as_scalar_mut(&mut self) -> &mut T
2317 where
2318 S: RawStorageMut<T, U1>,
2319 {
2320 &mut self[(0, 0)]
2321 }
2322 pub fn to_scalar(&self) -> T
2340 where
2341 T: Clone,
2342 {
2343 self.as_scalar().clone()
2344 }
2345}
2346
2347impl<T> super::alias::Matrix1<T> {
2348 pub fn into_scalar(self) -> T {
2367 let [[scalar]] = self.data.0;
2368 scalar
2369 }
2370}