OMG, I'm close now!
This commit is contained in:
@@ -1,3 +1,4 @@
|
||||
use derivative::Derivative;
|
||||
use ndarray::prelude::*;
|
||||
use ndarray::stack;
|
||||
use ndarray_linalg::Inverse;
|
||||
@@ -6,8 +7,11 @@ use crate::kernel::Kernel;
|
||||
|
||||
use super::Fitter;
|
||||
|
||||
#[derive(Derivative)]
|
||||
#[derivative(Debug)]
|
||||
pub struct RecursiveFitter {
|
||||
ts_new: Vec<f64>,
|
||||
#[derivative(Debug = "ignore")]
|
||||
kernel: Box<dyn Kernel>,
|
||||
ts: Vec<f64>,
|
||||
ms: ArrayD<f64>,
|
||||
@@ -122,7 +126,7 @@ impl Fitter for RecursiveFitter {
|
||||
|
||||
fn fit(&mut self) {
|
||||
if !self.is_allocated() {
|
||||
// raise RuntimeError("new data since last call to `allocate()`")
|
||||
panic!("new data since last call to `allocate()`");
|
||||
}
|
||||
|
||||
if self.ts.is_empty() {
|
||||
@@ -146,7 +150,7 @@ impl Fitter for RecursiveFitter {
|
||||
let k = Array1::from(k);
|
||||
|
||||
self.m_f[i] =
|
||||
(&self.m_p[i] + &k) * (&self.ns[i] - &self.xs[i] * &self.h.dot(&self.m_p[i]));
|
||||
&self.m_p[i] + &(&k * (&self.ns[i] - &self.xs[i] * &self.h.dot(&self.m_p[i])));
|
||||
|
||||
// Covariance matrix is computed using the Joseph form.
|
||||
let outer = (self.xs[i] * &k)
|
||||
|
||||
@@ -132,6 +132,39 @@ impl Kernel for Vec<Box<dyn Kernel>> {
|
||||
feedback
|
||||
}
|
||||
|
||||
fn transition(&self, t0: f64, t1: f64) -> Array2<f64> {
|
||||
let data = self
|
||||
.iter()
|
||||
.map(|kernel| kernel.transition(t0, t1))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let dim = data
|
||||
.iter()
|
||||
.fold((0, 0), |(w, h), m| (w + m.ncols(), h + m.nrows()));
|
||||
|
||||
let mut transition = Array2::zeros(dim);
|
||||
|
||||
let mut r_d = 0;
|
||||
let mut c_d = 0;
|
||||
|
||||
for m in data {
|
||||
for ((r, c), v) in m.indexed_iter() {
|
||||
transition[(r + r_d, c + c_d)] = *v;
|
||||
}
|
||||
|
||||
r_d += m.nrows();
|
||||
c_d += m.ncols();
|
||||
}
|
||||
|
||||
transition
|
||||
}
|
||||
|
||||
/*
|
||||
def transition(self, t1, t2):
|
||||
mats = [k.transition(t1, t2) for k in self.parts]
|
||||
return block_diag(*mats)
|
||||
*/
|
||||
|
||||
fn noise_cov(&self, t0: f64, t1: f64) -> Array2<f64> {
|
||||
let data = self
|
||||
.iter()
|
||||
|
||||
@@ -39,7 +39,7 @@ impl Kernel for Exponential {
|
||||
}
|
||||
|
||||
fn transition(&self, t0: f64, t1: f64) -> Array2<f64> {
|
||||
-(t1 - t0) / self.l_scale * array![[1.0]]
|
||||
(-(t1 - t0) / self.l_scale).exp() * array![[1.0]]
|
||||
}
|
||||
|
||||
fn noise_cov(&self, t0: f64, t1: f64) -> Array2<f64> {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use std::f64::consts::PI;
|
||||
|
||||
use statrs::function::erf::erfc;
|
||||
|
||||
const CS: [f64; 14] = [
|
||||
0.00048204,
|
||||
-0.00142906,
|
||||
@@ -36,10 +38,9 @@ const QS: [f64; 6] = [
|
||||
|
||||
/// Normal cumulative density function.
|
||||
fn normcdf(x: f64) -> f64 {
|
||||
// If X ~ N(0,1), returns P(X < x).
|
||||
// erfc(-x / SQRT2) / 2.0
|
||||
// https://docs.rs/statrs/0.12.0/statrs/function/erf/fn.erfc.html
|
||||
todo!();
|
||||
let SQRT2: f64 = 2.0f64.sqrt();
|
||||
|
||||
erfc(-x / SQRT2) / 2.0
|
||||
}
|
||||
|
||||
/// Compute the log of the normal cumulative density function.
|
||||
|
||||
Reference in New Issue
Block a user