Implement more and more logic.

This commit is contained in:
2020-02-16 12:32:14 +01:00
parent dd5667d82c
commit fd249da405
5 changed files with 287 additions and 16 deletions

View File

@@ -1,3 +1,8 @@
use std::mem;
use ndarray::prelude::*;
use ndarray::stack;
use crate::kernel::Kernel;
use super::Fitter;
@@ -5,36 +10,144 @@ use super::Fitter;
pub struct RecursiveFitter {
ts_new: Vec<f64>,
kernel: Box<dyn Kernel>,
ts: Vec<usize>,
ms: Vec<usize>,
vs: Vec<usize>,
ns: Vec<usize>,
xs: Vec<usize>,
ts: Vec<f64>,
ms: ArrayD<f64>,
vs: Array1<f64>,
ns: ArrayD<f64>,
xs: ArrayD<f64>,
is_fitted: bool,
h: Array1<f64>,
i: ArrayD<f64>,
a: ArrayD<f64>,
q: ArrayD<f64>,
m_p: ArrayD<f64>,
p_p: ArrayD<f64>,
m_f: ArrayD<f64>,
p_f: ArrayD<f64>,
m_s: ArrayD<f64>,
p_s: ArrayD<f64>,
}
impl RecursiveFitter {
pub fn new(kernel: Box<dyn Kernel>) -> Self {
let m = kernel.order();
let h = kernel.measurement_vector();
RecursiveFitter {
ts_new: Vec::new(),
kernel,
ts: Vec::new(),
ms: Vec::new(),
vs: Vec::new(),
ns: Vec::new(),
xs: Vec::new(),
ms: Array::zeros(0).into_dyn(),
vs: Array1::zeros(0),
ns: Array::zeros(0).into_dyn(),
xs: Array::zeros(0).into_dyn(),
is_fitted: true,
h,
i: Array::eye(m).into_dyn(),
a: Array::zeros((0, m, m)).into_dyn(),
q: Array::zeros((0, m, m)).into_dyn(),
m_p: Array::zeros((0, m)).into_dyn(),
p_p: Array::zeros((0, m, m)).into_dyn(),
m_f: Array::zeros((0, m)).into_dyn(),
p_f: Array::zeros((0, m, m)).into_dyn(),
m_s: Array::zeros((0, m)).into_dyn(),
p_s: Array::zeros((0, m, m)).into_dyn(),
}
}
}
impl Fitter for RecursiveFitter {
fn add_sample(&mut self, t: f64) -> usize {
todo!();
let idx = self.ts.len() + self.ts_new.len();
self.ts_new.push(t);
self.is_fitted = false;
idx
}
fn allocate(&mut self) {
let n_new = self.ts_new.len();
if n_new == 0 {
return;
}
// Usual variables.
let zeros = Array::zeros(n_new).into_dyn();
self.ts.extend(self.ts_new.iter());
self.ms = stack![Axis(0), self.ms, zeros];
self.vs = stack![Axis(0), self.vs, self.kernel.k_diag(&self.ts_new)];
self.ns = stack![Axis(0), self.ns, zeros];
self.xs = stack![Axis(0), self.xs, zeros];
// Initialize the predictive, filtering and smoothing distributions.
let mean = self
.ts_new
.iter()
.flat_map(|t| self.kernel.state_mean(*t).to_vec().into_iter())
.collect::<Vec<f64>>();
let mean = Array::from_shape_vec((self.ts_new.len(), self.kernel.order()), mean)
.unwrap()
.into_dyn();
let cov = self
.ts_new
.iter()
.flat_map(|t| {
self.kernel
.state_cov(*t)
.iter()
.cloned()
.collect::<Vec<f64>>()
})
.collect::<Vec<f64>>();
let cov = Array3::from_shape_vec(
(self.ts_new.len(), self.kernel.order(), self.kernel.order()),
cov,
)
.unwrap()
.into_dyn();
self.m_p = stack![Axis(0), self.m_p, mean];
self.p_p = stack![Axis(0), self.p_p, cov];
self.m_f = stack![Axis(0), self.m_f, mean];
self.p_f = stack![Axis(0), self.p_f, cov];
self.m_s = stack![Axis(0), self.m_s, mean];
self.p_s = stack![Axis(0), self.p_s, cov];
// Compute the new transition and noise covariance matrices.
let m = self.kernel.order();
self.a = stack![Axis(0), self.a, Array::zeros((n_new, m, m)).into_dyn()];
self.q = stack![Axis(0), self.q, Array::zeros((n_new, m, m)).into_dyn()];
for i in (self.ts.len() - n_new)..self.ts.len() {
if i == 0 {
continue;
}
self.a[i - 1] = self.kernel.transition(self.ts[i - 1], self.ts[1]);
// self.q[i - 1] = self.kernel.noise_cov(self.ts[i - 1], self.ts[1]);
}
todo!();
/*
for i in range(len(self.ts) - n_new, len(self.ts)):
if i == 0:
# Very first sample, no need to compute anything.
continue
self._A[i-1] = self.kernel.transition(self.ts[i-1], self.ts[i])
self._Q[i-1] = self.kernel.noise_cov(self.ts[i-1], self.ts[i])
# Clear the list of pending samples.
self.ts_new = list()
*/
}
fn fit(&mut self) {

View File

@@ -1,3 +1,5 @@
use ndarray::prelude::*;
mod constant;
mod exponential;
mod matern52;
@@ -6,6 +8,73 @@ pub use constant::Constant;
pub use exponential::Exponential;
pub use matern52::Matern52;
pub trait Kernel {}
pub trait Kernel {
fn k_diag(&self, ts: &[f64]) -> Array1<f64>;
fn order(&self) -> usize;
fn state_mean(&self, t: f64) -> Array1<f64>;
fn state_cov(&self, t: f64) -> Array2<f64>;
fn measurement_vector(&self) -> Array1<f64>;
fn transition(&self, t0: f64, t1: f64) -> Array2<f64>;
}
impl Kernel for Vec<Box<dyn Kernel>> {}
impl Kernel for Vec<Box<dyn Kernel>> {
fn k_diag(&self, ts: &[f64]) -> Array1<f64> {
self.iter()
.fold(Array1::zeros(ts.len()), |k_diag: Array1<f64>, kernel| {
k_diag + kernel.k_diag(ts)
})
}
fn order(&self) -> usize {
self.iter().map(|kernel| kernel.order()).sum()
}
fn state_mean(&self, t: f64) -> Array1<f64> {
let data = self
.iter()
.flat_map(|kernel| kernel.state_mean(t).to_vec().into_iter())
.collect::<Vec<f64>>();
Array1::from(data)
}
fn state_cov(&self, t: f64) -> Array2<f64> {
let data = self
.iter()
.map(|kernel| kernel.state_cov(t))
.collect::<Vec<_>>();
let dim = data
.iter()
.fold((0, 0), |(w, h), m| (w + m.ncols(), h + m.nrows()));
let mut cov = Array2::zeros(dim);
let mut r_d = 0;
let mut c_d = 0;
for m in data {
for ((r, c), v) in m.indexed_iter() {
cov[(r + r_d, c + c_d)] = *v;
}
r_d += m.nrows();
c_d += m.ncols();
}
cov
}
fn measurement_vector(&self) -> Array1<f64> {
let data = self
.iter()
.flat_map(|kernel| kernel.measurement_vector().to_vec().into_iter())
.collect::<Vec<f64>>();
Array1::from(data)
}
fn transition(&self, t0: f64, t1: f64) -> Array2<f64> {
todo!();
}
}

View File

@@ -1,3 +1,5 @@
use ndarray::prelude::*;
use super::Kernel;
pub struct Constant {
@@ -10,4 +12,28 @@ impl Constant {
}
}
impl Kernel for Constant {}
impl Kernel for Constant {
fn k_diag(&self, ts: &[f64]) -> Array1<f64> {
Array1::ones(ts.len()) * self.var
}
fn order(&self) -> usize {
1
}
fn state_mean(&self, t: f64) -> Array1<f64> {
Array1::zeros(1)
}
fn state_cov(&self, t: f64) -> Array2<f64> {
array![[1.0]] * self.var
}
fn measurement_vector(&self) -> Array1<f64> {
array![1.0]
}
fn transition(&self, t0: f64, t1: f64) -> Array2<f64> {
todo!();
}
}

View File

@@ -1,3 +1,5 @@
use ndarray::prelude::*;
use super::Kernel;
pub struct Exponential {
@@ -11,4 +13,28 @@ impl Exponential {
}
}
impl Kernel for Exponential {}
impl Kernel for Exponential {
fn k_diag(&self, ts: &[f64]) -> Array1<f64> {
Array1::ones(ts.len()) * self.var
}
fn order(&self) -> usize {
1
}
fn state_mean(&self, t: f64) -> Array1<f64> {
Array1::zeros(1)
}
fn state_cov(&self, t: f64) -> Array2<f64> {
array![[1.0]] * self.var
}
fn measurement_vector(&self) -> Array1<f64> {
array![1.0]
}
fn transition(&self, t0: f64, t1: f64) -> Array2<f64> {
todo!();
}
}

View File

@@ -1,14 +1,51 @@
use ndarray::prelude::*;
use super::Kernel;
pub struct Matern52 {
var: f64,
l_scale: f64,
lambda: f64,
}
impl Matern52 {
pub fn new(var: f64, l_scale: f64) -> Self {
Matern52 { var, l_scale }
Matern52 {
var,
l_scale,
lambda: 5.0f64.sqrt() / l_scale,
}
}
}
impl Kernel for Matern52 {}
impl Kernel for Matern52 {
fn k_diag(&self, ts: &[f64]) -> Array1<f64> {
Array1::ones(ts.len()) * self.var
}
fn order(&self) -> usize {
3
}
fn state_mean(&self, t: f64) -> Array1<f64> {
Array1::zeros(3)
}
fn state_cov(&self, t: f64) -> Array2<f64> {
let a = self.lambda;
array![
[1.0, 0.0, -a * a / 3.0],
[0.0, a * a / 3.0, 0.0],
[-a * a / 3.0, 0.0, a.powi(4)]
] * self.var
}
fn measurement_vector(&self) -> Array1<f64> {
array![1.0, 0.0, 0.0]
}
fn transition(&self, t0: f64, t1: f64) -> Array2<f64> {
todo!();
}
}