nalgebra/linalg/cholesky.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
#[cfg(feature = "serde-serialize-no-std")]
use serde::{Deserialize, Serialize};
use num::{One, Zero};
use simba::scalar::ComplexField;
use simba::simd::SimdComplexField;
use crate::allocator::Allocator;
use crate::base::{Const, DefaultAllocator, Matrix, OMatrix, Vector};
use crate::constraint::{SameNumberOfRows, ShapeConstraint};
use crate::dimension::{Dim, DimAdd, DimDiff, DimSub, DimSum, U1};
use crate::storage::{Storage, StorageMut};
/// The Cholesky decomposition of a symmetric-definite-positive matrix.
#[cfg_attr(feature = "serde-serialize-no-std", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize-no-std",
serde(bound(serialize = "DefaultAllocator: Allocator<D>,
OMatrix<T, D, D>: Serialize"))
)]
#[cfg_attr(
feature = "serde-serialize-no-std",
serde(bound(deserialize = "DefaultAllocator: Allocator<D>,
OMatrix<T, D, D>: Deserialize<'de>"))
)]
#[derive(Clone, Debug)]
pub struct Cholesky<T: SimdComplexField, D: Dim>
where
DefaultAllocator: Allocator<D, D>,
{
chol: OMatrix<T, D, D>,
}
impl<T: SimdComplexField, D: Dim> Copy for Cholesky<T, D>
where
DefaultAllocator: Allocator<D, D>,
OMatrix<T, D, D>: Copy,
{
}
impl<T: SimdComplexField, D: Dim> Cholesky<T, D>
where
DefaultAllocator: Allocator<D, D>,
{
/// Computes the Cholesky decomposition of `matrix` without checking that the matrix is definite-positive.
///
/// If the input matrix is not definite-positive, the decomposition may contain trash values (Inf, NaN, etc.)
pub fn new_unchecked(mut matrix: OMatrix<T, D, D>) -> Self {
assert!(matrix.is_square(), "The input matrix must be square.");
let n = matrix.nrows();
for j in 0..n {
for k in 0..j {
let factor = unsafe { -matrix.get_unchecked((j, k)).clone() };
let (mut col_j, col_k) = matrix.columns_range_pair_mut(j, k);
let mut col_j = col_j.rows_range_mut(j..);
let col_k = col_k.rows_range(j..);
col_j.axpy(factor.simd_conjugate(), &col_k, T::one());
}
let diag = unsafe { matrix.get_unchecked((j, j)).clone() };
let denom = diag.simd_sqrt();
unsafe {
*matrix.get_unchecked_mut((j, j)) = denom.clone();
}
let mut col = matrix.view_range_mut(j + 1.., j);
col /= denom;
}
Cholesky { chol: matrix }
}
/// Uses the given matrix as-is without any checks or modifications as the
/// Cholesky decomposition.
///
/// It is up to the user to ensure all invariants hold.
pub fn pack_dirty(matrix: OMatrix<T, D, D>) -> Self {
Cholesky { chol: matrix }
}
/// Retrieves the lower-triangular factor of the Cholesky decomposition with its strictly
/// upper-triangular part filled with zeros.
pub fn unpack(mut self) -> OMatrix<T, D, D> {
self.chol.fill_upper_triangle(T::zero(), 1);
self.chol
}
/// Retrieves the lower-triangular factor of the Cholesky decomposition, without zeroing-out
/// its strict upper-triangular part.
///
/// The values of the strict upper-triangular part are garbage and should be ignored by further
/// computations.
pub fn unpack_dirty(self) -> OMatrix<T, D, D> {
self.chol
}
/// Retrieves the lower-triangular factor of the Cholesky decomposition with its strictly
/// uppen-triangular part filled with zeros.
#[must_use]
pub fn l(&self) -> OMatrix<T, D, D> {
self.chol.lower_triangle()
}
/// Retrieves the lower-triangular factor of the Cholesky decomposition, without zeroing-out
/// its strict upper-triangular part.
///
/// This is an allocation-less version of `self.l()`. The values of the strict upper-triangular
/// part are garbage and should be ignored by further computations.
#[must_use]
pub fn l_dirty(&self) -> &OMatrix<T, D, D> {
&self.chol
}
/// Solves the system `self * x = b` where `self` is the decomposed matrix and `x` the unknown.
///
/// The result is stored on `b`.
pub fn solve_mut<R2: Dim, C2: Dim, S2>(&self, b: &mut Matrix<T, R2, C2, S2>)
where
S2: StorageMut<T, R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
self.chol.solve_lower_triangular_unchecked_mut(b);
self.chol.ad_solve_lower_triangular_unchecked_mut(b);
}
/// Returns the solution of the system `self * x = b` where `self` is the decomposed matrix and
/// `x` the unknown.
#[must_use = "Did you mean to use solve_mut()?"]
pub fn solve<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<T, R2, C2, S2>) -> OMatrix<T, R2, C2>
where
S2: Storage<T, R2, C2>,
DefaultAllocator: Allocator<R2, C2>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
let mut res = b.clone_owned();
self.solve_mut(&mut res);
res
}
/// Computes the inverse of the decomposed matrix.
#[must_use]
pub fn inverse(&self) -> OMatrix<T, D, D> {
let shape = self.chol.shape_generic();
let mut res = OMatrix::identity_generic(shape.0, shape.1);
self.solve_mut(&mut res);
res
}
/// Computes the determinant of the decomposed matrix.
#[must_use]
pub fn determinant(&self) -> T::SimdRealField {
let dim = self.chol.nrows();
let mut prod_diag = T::one();
for i in 0..dim {
prod_diag *= unsafe { self.chol.get_unchecked((i, i)).clone() };
}
prod_diag.simd_modulus_squared()
}
/// Computes the natural logarithm of determinant of the decomposed matrix.
///
/// This method is more robust than `.determinant()` to very small or very
/// large determinants since it returns the natural logarithm of the
/// determinant rather than the determinant itself.
#[must_use]
pub fn ln_determinant(&self) -> T::SimdRealField {
let dim = self.chol.nrows();
let mut sum_diag = T::SimdRealField::zero();
for i in 0..dim {
sum_diag += unsafe {
self.chol
.get_unchecked((i, i))
.clone()
.simd_modulus_squared()
.simd_ln()
};
}
sum_diag
}
}
impl<T: ComplexField, D: Dim> Cholesky<T, D>
where
DefaultAllocator: Allocator<D, D>,
{
/// Attempts to compute the Cholesky decomposition of `matrix`.
///
/// Returns `None` if the input matrix is not definite-positive. The input matrix is assumed
/// to be symmetric and only the lower-triangular part is read.
pub fn new(matrix: OMatrix<T, D, D>) -> Option<Self> {
Self::new_internal(matrix, None)
}
/// Attempts to approximate the Cholesky decomposition of `matrix` by
/// replacing non-positive values on the diagonals during the decomposition
/// with the given `substitute`.
///
/// [`try_sqrt`](ComplexField::try_sqrt) will be applied to the `substitute`
/// when it has to be used.
///
/// If your input matrix results only in positive values on the diagonals
/// during the decomposition, `substitute` is unused and the result is just
/// the same as if you used [`new`](Cholesky::new).
///
/// This method allows to compensate for matrices with very small or even
/// negative values due to numerical errors but necessarily results in only
/// an approximation: it is basically a hack. If you don't specifically need
/// Cholesky, it may be better to consider alternatives like the
/// [`LU`](crate::linalg::LU) decomposition/factorization.
pub fn new_with_substitute(matrix: OMatrix<T, D, D>, substitute: T) -> Option<Self> {
Self::new_internal(matrix, Some(substitute))
}
/// Common implementation for `new` and `new_with_substitute`.
fn new_internal(mut matrix: OMatrix<T, D, D>, substitute: Option<T>) -> Option<Self> {
assert!(matrix.is_square(), "The input matrix must be square.");
let n = matrix.nrows();
for j in 0..n {
for k in 0..j {
let factor = unsafe { -matrix.get_unchecked((j, k)).clone() };
let (mut col_j, col_k) = matrix.columns_range_pair_mut(j, k);
let mut col_j = col_j.rows_range_mut(j..);
let col_k = col_k.rows_range(j..);
col_j.axpy(factor.conjugate(), &col_k, T::one());
}
let sqrt_denom = |v: T| {
if v.is_zero() {
return None;
}
v.try_sqrt()
};
let diag = unsafe { matrix.get_unchecked((j, j)).clone() };
if let Some(denom) =
sqrt_denom(diag).or_else(|| substitute.clone().and_then(sqrt_denom))
{
unsafe {
*matrix.get_unchecked_mut((j, j)) = denom.clone();
}
let mut col = matrix.view_range_mut(j + 1.., j);
col /= denom;
continue;
}
// The diagonal element is either zero or its square root could not
// be taken (e.g. for negative real numbers).
return None;
}
Some(Cholesky { chol: matrix })
}
/// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `v`,
/// performs a rank one update such that we end up with the decomposition of `M + sigma * (v * v.adjoint())`.
#[inline]
pub fn rank_one_update<R2: Dim, S2>(&mut self, x: &Vector<T, R2, S2>, sigma: T::RealField)
where
S2: Storage<T, R2, U1>,
DefaultAllocator: Allocator<R2, U1>,
ShapeConstraint: SameNumberOfRows<R2, D>,
{
Self::xx_rank_one_update(&mut self.chol, &mut x.clone_owned(), sigma)
}
/// Updates the decomposition such that we get the decomposition of a matrix with the given column `col` in the `j`th position.
/// Since the matrix is square, an identical row will be added in the `j`th row.
pub fn insert_column<R2, S2>(
&self,
j: usize,
col: Vector<T, R2, S2>,
) -> Cholesky<T, DimSum<D, U1>>
where
D: DimAdd<U1>,
R2: Dim,
S2: Storage<T, R2, U1>,
DefaultAllocator: Allocator<DimSum<D, U1>, DimSum<D, U1>> + Allocator<R2>,
ShapeConstraint: SameNumberOfRows<R2, DimSum<D, U1>>,
{
let mut col = col.into_owned();
// for an explanation of the formulas, see https://en.wikipedia.org/wiki/Cholesky_decomposition#Updating_the_decomposition
let n = col.nrows();
assert_eq!(
n,
self.chol.nrows() + 1,
"The new column must have the size of the factored matrix plus one."
);
assert!(j < n, "j needs to be within the bound of the new matrix.");
// loads the data into a new matrix with an additional jth row/column
// TODO: would it be worth it to avoid the zero-initialization?
let mut chol = Matrix::zeros_generic(
self.chol.shape_generic().0.add(Const::<1>),
self.chol.shape_generic().1.add(Const::<1>),
);
chol.view_range_mut(..j, ..j)
.copy_from(&self.chol.view_range(..j, ..j));
chol.view_range_mut(..j, j + 1..)
.copy_from(&self.chol.view_range(..j, j..));
chol.view_range_mut(j + 1.., ..j)
.copy_from(&self.chol.view_range(j.., ..j));
chol.view_range_mut(j + 1.., j + 1..)
.copy_from(&self.chol.view_range(j.., j..));
// update the jth row
let top_left_corner = self.chol.view_range(..j, ..j);
let col_j = col[j].clone();
let (mut new_rowj_adjoint, mut new_colj) = col.rows_range_pair_mut(..j, j + 1..);
assert!(
top_left_corner.solve_lower_triangular_mut(&mut new_rowj_adjoint),
"Cholesky::insert_column : Unable to solve lower triangular system!"
);
new_rowj_adjoint.adjoint_to(&mut chol.view_range_mut(j, ..j));
// update the center element
let center_element = T::sqrt(col_j - T::from_real(new_rowj_adjoint.norm_squared()));
chol[(j, j)] = center_element.clone();
// update the jth column
let bottom_left_corner = self.chol.view_range(j.., ..j);
// new_colj = (col_jplus - bottom_left_corner * new_rowj.adjoint()) / center_element;
new_colj.gemm(
-T::one() / center_element.clone(),
&bottom_left_corner,
&new_rowj_adjoint,
T::one() / center_element,
);
chol.view_range_mut(j + 1.., j).copy_from(&new_colj);
// update the bottom right corner
let mut bottom_right_corner = chol.view_range_mut(j + 1.., j + 1..);
Self::xx_rank_one_update(
&mut bottom_right_corner,
&mut new_colj,
-T::RealField::one(),
);
Cholesky { chol }
}
/// Updates the decomposition such that we get the decomposition of the factored matrix with its `j`th column removed.
/// Since the matrix is square, the `j`th row will also be removed.
#[must_use]
pub fn remove_column(&self, j: usize) -> Cholesky<T, DimDiff<D, U1>>
where
D: DimSub<U1>,
DefaultAllocator: Allocator<DimDiff<D, U1>, DimDiff<D, U1>> + Allocator<D>,
{
let n = self.chol.nrows();
assert!(n > 0, "The matrix needs at least one column.");
assert!(j < n, "j needs to be within the bound of the matrix.");
// loads the data into a new matrix except for the jth row/column
// TODO: would it be worth it to avoid this zero initialization?
let mut chol = Matrix::zeros_generic(
self.chol.shape_generic().0.sub(Const::<1>),
self.chol.shape_generic().1.sub(Const::<1>),
);
chol.view_range_mut(..j, ..j)
.copy_from(&self.chol.view_range(..j, ..j));
chol.view_range_mut(..j, j..)
.copy_from(&self.chol.view_range(..j, j + 1..));
chol.view_range_mut(j.., ..j)
.copy_from(&self.chol.view_range(j + 1.., ..j));
chol.view_range_mut(j.., j..)
.copy_from(&self.chol.view_range(j + 1.., j + 1..));
// updates the bottom right corner
let mut bottom_right_corner = chol.view_range_mut(j.., j..);
let mut workspace = self.chol.column(j).clone_owned();
let mut old_colj = workspace.rows_range_mut(j + 1..);
Self::xx_rank_one_update(&mut bottom_right_corner, &mut old_colj, T::RealField::one());
Cholesky { chol }
}
/// Given the Cholesky decomposition of a matrix `M`, a scalar `sigma` and a vector `x`,
/// performs a rank one update such that we end up with the decomposition of `M + sigma * (x * x.adjoint())`.
///
/// This helper method is called by `rank_one_update` but also `insert_column` and `remove_column`
/// where it is used on a square view of the decomposition
fn xx_rank_one_update<Dm, Sm, Rx, Sx>(
chol: &mut Matrix<T, Dm, Dm, Sm>,
x: &mut Vector<T, Rx, Sx>,
sigma: T::RealField,
) where
//T: ComplexField,
Dm: Dim,
Rx: Dim,
Sm: StorageMut<T, Dm, Dm>,
Sx: StorageMut<T, Rx, U1>,
{
// heavily inspired by Eigen's `llt_rank_update_lower` implementation https://eigen.tuxfamily.org/dox/LLT_8h_source.html
let n = x.nrows();
assert_eq!(
n,
chol.nrows(),
"The input vector must be of the same size as the factorized matrix."
);
let mut beta = crate::one::<T::RealField>();
for j in 0..n {
// updates the diagonal
let diag = T::real(unsafe { chol.get_unchecked((j, j)).clone() });
let diag2 = diag.clone() * diag.clone();
let xj = unsafe { x.get_unchecked(j).clone() };
let sigma_xj2 = sigma.clone() * T::modulus_squared(xj.clone());
let gamma = diag2.clone() * beta.clone() + sigma_xj2.clone();
let new_diag = (diag2.clone() + sigma_xj2.clone() / beta.clone()).sqrt();
unsafe { *chol.get_unchecked_mut((j, j)) = T::from_real(new_diag.clone()) };
beta += sigma_xj2 / diag2;
// updates the terms of L
let mut xjplus = x.rows_range_mut(j + 1..);
let mut col_j = chol.view_range_mut(j + 1.., j);
// temp_jplus -= (wj / T::from_real(diag)) * col_j;
xjplus.axpy(-xj.clone() / T::from_real(diag.clone()), &col_j, T::one());
if gamma != crate::zero::<T::RealField>() {
// col_j = T::from_real(nljj / diag) * col_j + (T::from_real(nljj * sigma / gamma) * T::conjugate(wj)) * temp_jplus;
col_j.axpy(
T::from_real(new_diag.clone() * sigma.clone() / gamma) * T::conjugate(xj),
&xjplus,
T::from_real(new_diag / diag),
);
}
}
}
}