From fd249da405a23b396cf8557d172ab8b71826f414 Mon Sep 17 00:00:00 2001 From: Anders Olsson Date: Sun, 16 Feb 2020 12:32:14 +0100 Subject: [PATCH] Implement more and more logic. --- src/fitter/recursive.rs | 133 +++++++++++++++++++++++++++++++++++--- src/kernel.rs | 73 ++++++++++++++++++++- src/kernel/constant.rs | 28 +++++++- src/kernel/exponential.rs | 28 +++++++- src/kernel/matern52.rs | 41 +++++++++++- 5 files changed, 287 insertions(+), 16 deletions(-) diff --git a/src/fitter/recursive.rs b/src/fitter/recursive.rs index 4dada67..3902c23 100644 --- a/src/fitter/recursive.rs +++ b/src/fitter/recursive.rs @@ -1,3 +1,8 @@ +use std::mem; + +use ndarray::prelude::*; +use ndarray::stack; + use crate::kernel::Kernel; use super::Fitter; @@ -5,36 +10,144 @@ use super::Fitter; pub struct RecursiveFitter { ts_new: Vec, kernel: Box, - ts: Vec, - ms: Vec, - vs: Vec, - ns: Vec, - xs: Vec, + ts: Vec, + ms: ArrayD, + vs: Array1, + ns: ArrayD, + xs: ArrayD, is_fitted: bool, + h: Array1, + i: ArrayD, + a: ArrayD, + q: ArrayD, + m_p: ArrayD, + p_p: ArrayD, + m_f: ArrayD, + p_f: ArrayD, + m_s: ArrayD, + p_s: ArrayD, } impl RecursiveFitter { pub fn new(kernel: Box) -> Self { + let m = kernel.order(); + let h = kernel.measurement_vector(); + RecursiveFitter { ts_new: Vec::new(), kernel, ts: Vec::new(), - ms: Vec::new(), - vs: Vec::new(), - ns: Vec::new(), - xs: Vec::new(), + ms: Array::zeros(0).into_dyn(), + vs: Array1::zeros(0), + ns: Array::zeros(0).into_dyn(), + xs: Array::zeros(0).into_dyn(), is_fitted: true, + h, + i: Array::eye(m).into_dyn(), + a: Array::zeros((0, m, m)).into_dyn(), + q: Array::zeros((0, m, m)).into_dyn(), + m_p: Array::zeros((0, m)).into_dyn(), + p_p: Array::zeros((0, m, m)).into_dyn(), + m_f: Array::zeros((0, m)).into_dyn(), + p_f: Array::zeros((0, m, m)).into_dyn(), + m_s: Array::zeros((0, m)).into_dyn(), + p_s: Array::zeros((0, m, m)).into_dyn(), } } } impl Fitter for RecursiveFitter { fn add_sample(&mut self, t: f64) -> usize { - todo!(); + let idx = self.ts.len() + self.ts_new.len(); + + self.ts_new.push(t); + self.is_fitted = false; + + idx } fn allocate(&mut self) { + let n_new = self.ts_new.len(); + + if n_new == 0 { + return; + } + + // Usual variables. + let zeros = Array::zeros(n_new).into_dyn(); + + self.ts.extend(self.ts_new.iter()); + self.ms = stack![Axis(0), self.ms, zeros]; + self.vs = stack![Axis(0), self.vs, self.kernel.k_diag(&self.ts_new)]; + self.ns = stack![Axis(0), self.ns, zeros]; + self.xs = stack![Axis(0), self.xs, zeros]; + + // Initialize the predictive, filtering and smoothing distributions. + let mean = self + .ts_new + .iter() + .flat_map(|t| self.kernel.state_mean(*t).to_vec().into_iter()) + .collect::>(); + + let mean = Array::from_shape_vec((self.ts_new.len(), self.kernel.order()), mean) + .unwrap() + .into_dyn(); + + let cov = self + .ts_new + .iter() + .flat_map(|t| { + self.kernel + .state_cov(*t) + .iter() + .cloned() + .collect::>() + }) + .collect::>(); + + let cov = Array3::from_shape_vec( + (self.ts_new.len(), self.kernel.order(), self.kernel.order()), + cov, + ) + .unwrap() + .into_dyn(); + + self.m_p = stack![Axis(0), self.m_p, mean]; + self.p_p = stack![Axis(0), self.p_p, cov]; + + self.m_f = stack![Axis(0), self.m_f, mean]; + self.p_f = stack![Axis(0), self.p_f, cov]; + + self.m_s = stack![Axis(0), self.m_s, mean]; + self.p_s = stack![Axis(0), self.p_s, cov]; + + // Compute the new transition and noise covariance matrices. + let m = self.kernel.order(); + + self.a = stack![Axis(0), self.a, Array::zeros((n_new, m, m)).into_dyn()]; + self.q = stack![Axis(0), self.q, Array::zeros((n_new, m, m)).into_dyn()]; + + for i in (self.ts.len() - n_new)..self.ts.len() { + if i == 0 { + continue; + } + + self.a[i - 1] = self.kernel.transition(self.ts[i - 1], self.ts[1]); + // self.q[i - 1] = self.kernel.noise_cov(self.ts[i - 1], self.ts[1]); + } + todo!(); + + /* + for i in range(len(self.ts) - n_new, len(self.ts)): + if i == 0: + # Very first sample, no need to compute anything. + continue + self._A[i-1] = self.kernel.transition(self.ts[i-1], self.ts[i]) + self._Q[i-1] = self.kernel.noise_cov(self.ts[i-1], self.ts[i]) + # Clear the list of pending samples. + self.ts_new = list() + */ } fn fit(&mut self) { diff --git a/src/kernel.rs b/src/kernel.rs index 89ee905..4fa2f78 100644 --- a/src/kernel.rs +++ b/src/kernel.rs @@ -1,3 +1,5 @@ +use ndarray::prelude::*; + mod constant; mod exponential; mod matern52; @@ -6,6 +8,73 @@ pub use constant::Constant; pub use exponential::Exponential; pub use matern52::Matern52; -pub trait Kernel {} +pub trait Kernel { + fn k_diag(&self, ts: &[f64]) -> Array1; + fn order(&self) -> usize; + fn state_mean(&self, t: f64) -> Array1; + fn state_cov(&self, t: f64) -> Array2; + fn measurement_vector(&self) -> Array1; + fn transition(&self, t0: f64, t1: f64) -> Array2; +} -impl Kernel for Vec> {} +impl Kernel for Vec> { + fn k_diag(&self, ts: &[f64]) -> Array1 { + self.iter() + .fold(Array1::zeros(ts.len()), |k_diag: Array1, kernel| { + k_diag + kernel.k_diag(ts) + }) + } + + fn order(&self) -> usize { + self.iter().map(|kernel| kernel.order()).sum() + } + + fn state_mean(&self, t: f64) -> Array1 { + let data = self + .iter() + .flat_map(|kernel| kernel.state_mean(t).to_vec().into_iter()) + .collect::>(); + + Array1::from(data) + } + + fn state_cov(&self, t: f64) -> Array2 { + let data = self + .iter() + .map(|kernel| kernel.state_cov(t)) + .collect::>(); + + let dim = data + .iter() + .fold((0, 0), |(w, h), m| (w + m.ncols(), h + m.nrows())); + + let mut cov = Array2::zeros(dim); + + let mut r_d = 0; + let mut c_d = 0; + + for m in data { + for ((r, c), v) in m.indexed_iter() { + cov[(r + r_d, c + c_d)] = *v; + } + + r_d += m.nrows(); + c_d += m.ncols(); + } + + cov + } + + fn measurement_vector(&self) -> Array1 { + let data = self + .iter() + .flat_map(|kernel| kernel.measurement_vector().to_vec().into_iter()) + .collect::>(); + + Array1::from(data) + } + + fn transition(&self, t0: f64, t1: f64) -> Array2 { + todo!(); + } +} diff --git a/src/kernel/constant.rs b/src/kernel/constant.rs index 3b67ae7..ff6db44 100644 --- a/src/kernel/constant.rs +++ b/src/kernel/constant.rs @@ -1,3 +1,5 @@ +use ndarray::prelude::*; + use super::Kernel; pub struct Constant { @@ -10,4 +12,28 @@ impl Constant { } } -impl Kernel for Constant {} +impl Kernel for Constant { + fn k_diag(&self, ts: &[f64]) -> Array1 { + Array1::ones(ts.len()) * self.var + } + + fn order(&self) -> usize { + 1 + } + + fn state_mean(&self, t: f64) -> Array1 { + Array1::zeros(1) + } + + fn state_cov(&self, t: f64) -> Array2 { + array![[1.0]] * self.var + } + + fn measurement_vector(&self) -> Array1 { + array![1.0] + } + + fn transition(&self, t0: f64, t1: f64) -> Array2 { + todo!(); + } +} diff --git a/src/kernel/exponential.rs b/src/kernel/exponential.rs index 2a602c6..3e2b9af 100644 --- a/src/kernel/exponential.rs +++ b/src/kernel/exponential.rs @@ -1,3 +1,5 @@ +use ndarray::prelude::*; + use super::Kernel; pub struct Exponential { @@ -11,4 +13,28 @@ impl Exponential { } } -impl Kernel for Exponential {} +impl Kernel for Exponential { + fn k_diag(&self, ts: &[f64]) -> Array1 { + Array1::ones(ts.len()) * self.var + } + + fn order(&self) -> usize { + 1 + } + + fn state_mean(&self, t: f64) -> Array1 { + Array1::zeros(1) + } + + fn state_cov(&self, t: f64) -> Array2 { + array![[1.0]] * self.var + } + + fn measurement_vector(&self) -> Array1 { + array![1.0] + } + + fn transition(&self, t0: f64, t1: f64) -> Array2 { + todo!(); + } +} diff --git a/src/kernel/matern52.rs b/src/kernel/matern52.rs index 7501220..3c725be 100644 --- a/src/kernel/matern52.rs +++ b/src/kernel/matern52.rs @@ -1,14 +1,51 @@ +use ndarray::prelude::*; + use super::Kernel; pub struct Matern52 { var: f64, l_scale: f64, + lambda: f64, } impl Matern52 { pub fn new(var: f64, l_scale: f64) -> Self { - Matern52 { var, l_scale } + Matern52 { + var, + l_scale, + lambda: 5.0f64.sqrt() / l_scale, + } } } -impl Kernel for Matern52 {} +impl Kernel for Matern52 { + fn k_diag(&self, ts: &[f64]) -> Array1 { + Array1::ones(ts.len()) * self.var + } + + fn order(&self) -> usize { + 3 + } + + fn state_mean(&self, t: f64) -> Array1 { + Array1::zeros(3) + } + + fn state_cov(&self, t: f64) -> Array2 { + let a = self.lambda; + + array![ + [1.0, 0.0, -a * a / 3.0], + [0.0, a * a / 3.0, 0.0], + [-a * a / 3.0, 0.0, a.powi(4)] + ] * self.var + } + + fn measurement_vector(&self) -> Array1 { + array![1.0, 0.0, 0.0] + } + + fn transition(&self, t0: f64, t1: f64) -> Array2 { + todo!(); + } +}