diff --git a/Cargo.toml b/Cargo.toml index 0366af9..9cbb46b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,13 +5,14 @@ authors = ["Anders Olsson "] edition = "2018" [dependencies] +cblas = "0.2" +derivative = "1.0" expm = "0.1" +lapacke = "0.2" ndarray = "0.13" ndarray-linalg = { version = "0.12" } -cblas = "0.2" -lapacke = "0.2" -statrs = "0.12" +openblas-src = { version = "0.8", features = ["static"] } ordered-float = "1.0" rand = "0.6" rand_xoshiro = "0.1" -openblas-src = { version = "0.8", features = ["static"] } \ No newline at end of file +statrs = "0.12" diff --git a/src/fitter/recursive.rs b/src/fitter/recursive.rs index 775ab6b..8c5016c 100644 --- a/src/fitter/recursive.rs +++ b/src/fitter/recursive.rs @@ -1,3 +1,4 @@ +use derivative::Derivative; use ndarray::prelude::*; use ndarray::stack; use ndarray_linalg::Inverse; @@ -6,8 +7,11 @@ use crate::kernel::Kernel; use super::Fitter; +#[derive(Derivative)] +#[derivative(Debug)] pub struct RecursiveFitter { ts_new: Vec, + #[derivative(Debug = "ignore")] kernel: Box, ts: Vec, ms: ArrayD, @@ -122,7 +126,7 @@ impl Fitter for RecursiveFitter { fn fit(&mut self) { if !self.is_allocated() { - // raise RuntimeError("new data since last call to `allocate()`") + panic!("new data since last call to `allocate()`"); } if self.ts.is_empty() { @@ -146,7 +150,7 @@ impl Fitter for RecursiveFitter { let k = Array1::from(k); self.m_f[i] = - (&self.m_p[i] + &k) * (&self.ns[i] - &self.xs[i] * &self.h.dot(&self.m_p[i])); + &self.m_p[i] + &(&k * (&self.ns[i] - &self.xs[i] * &self.h.dot(&self.m_p[i]))); // Covariance matrix is computed using the Joseph form. let outer = (self.xs[i] * &k) diff --git a/src/kernel.rs b/src/kernel.rs index eec4a5d..3b71a1a 100644 --- a/src/kernel.rs +++ b/src/kernel.rs @@ -132,6 +132,39 @@ impl Kernel for Vec> { feedback } + fn transition(&self, t0: f64, t1: f64) -> Array2 { + let data = self + .iter() + .map(|kernel| kernel.transition(t0, t1)) + .collect::>(); + + let dim = data + .iter() + .fold((0, 0), |(w, h), m| (w + m.ncols(), h + m.nrows())); + + let mut transition = Array2::zeros(dim); + + let mut r_d = 0; + let mut c_d = 0; + + for m in data { + for ((r, c), v) in m.indexed_iter() { + transition[(r + r_d, c + c_d)] = *v; + } + + r_d += m.nrows(); + c_d += m.ncols(); + } + + transition + } + + /* + def transition(self, t1, t2): + mats = [k.transition(t1, t2) for k in self.parts] + return block_diag(*mats) + */ + fn noise_cov(&self, t0: f64, t1: f64) -> Array2 { let data = self .iter() diff --git a/src/kernel/exponential.rs b/src/kernel/exponential.rs index 60391c2..10e1d97 100644 --- a/src/kernel/exponential.rs +++ b/src/kernel/exponential.rs @@ -39,7 +39,7 @@ impl Kernel for Exponential { } fn transition(&self, t0: f64, t1: f64) -> Array2 { - -(t1 - t0) / self.l_scale * array![[1.0]] + (-(t1 - t0) / self.l_scale).exp() * array![[1.0]] } fn noise_cov(&self, t0: f64, t1: f64) -> Array2 { diff --git a/src/utils.rs b/src/utils.rs index b574b0a..0092076 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,5 +1,7 @@ use std::f64::consts::PI; +use statrs::function::erf::erfc; + const CS: [f64; 14] = [ 0.00048204, -0.00142906, @@ -36,10 +38,9 @@ const QS: [f64; 6] = [ /// Normal cumulative density function. fn normcdf(x: f64) -> f64 { - // If X ~ N(0,1), returns P(X < x). - // erfc(-x / SQRT2) / 2.0 - // https://docs.rs/statrs/0.12.0/statrs/function/erf/fn.erfc.html - todo!(); + let SQRT2: f64 = 2.0f64.sqrt(); + + erfc(-x / SQRT2) / 2.0 } /// Compute the log of the normal cumulative density function.