OMG, I'm close now!

This commit is contained in:
2020-02-20 14:36:44 +01:00
parent 6840699144
commit 9ac7e44776
5 changed files with 50 additions and 11 deletions

View File

@@ -5,13 +5,14 @@ authors = ["Anders Olsson <anders.e.olsson@gmail.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
cblas = "0.2"
derivative = "1.0"
expm = "0.1" expm = "0.1"
lapacke = "0.2"
ndarray = "0.13" ndarray = "0.13"
ndarray-linalg = { version = "0.12" } ndarray-linalg = { version = "0.12" }
cblas = "0.2" openblas-src = { version = "0.8", features = ["static"] }
lapacke = "0.2"
statrs = "0.12"
ordered-float = "1.0" ordered-float = "1.0"
rand = "0.6" rand = "0.6"
rand_xoshiro = "0.1" rand_xoshiro = "0.1"
openblas-src = { version = "0.8", features = ["static"] } statrs = "0.12"

View File

@@ -1,3 +1,4 @@
use derivative::Derivative;
use ndarray::prelude::*; use ndarray::prelude::*;
use ndarray::stack; use ndarray::stack;
use ndarray_linalg::Inverse; use ndarray_linalg::Inverse;
@@ -6,8 +7,11 @@ use crate::kernel::Kernel;
use super::Fitter; use super::Fitter;
#[derive(Derivative)]
#[derivative(Debug)]
pub struct RecursiveFitter { pub struct RecursiveFitter {
ts_new: Vec<f64>, ts_new: Vec<f64>,
#[derivative(Debug = "ignore")]
kernel: Box<dyn Kernel>, kernel: Box<dyn Kernel>,
ts: Vec<f64>, ts: Vec<f64>,
ms: ArrayD<f64>, ms: ArrayD<f64>,
@@ -122,7 +126,7 @@ impl Fitter for RecursiveFitter {
fn fit(&mut self) { fn fit(&mut self) {
if !self.is_allocated() { if !self.is_allocated() {
// raise RuntimeError("new data since last call to `allocate()`") panic!("new data since last call to `allocate()`");
} }
if self.ts.is_empty() { if self.ts.is_empty() {
@@ -146,7 +150,7 @@ impl Fitter for RecursiveFitter {
let k = Array1::from(k); let k = Array1::from(k);
self.m_f[i] = self.m_f[i] =
(&self.m_p[i] + &k) * (&self.ns[i] - &self.xs[i] * &self.h.dot(&self.m_p[i])); &self.m_p[i] + &(&k * (&self.ns[i] - &self.xs[i] * &self.h.dot(&self.m_p[i])));
// Covariance matrix is computed using the Joseph form. // Covariance matrix is computed using the Joseph form.
let outer = (self.xs[i] * &k) let outer = (self.xs[i] * &k)

View File

@@ -132,6 +132,39 @@ impl Kernel for Vec<Box<dyn Kernel>> {
feedback feedback
} }
fn transition(&self, t0: f64, t1: f64) -> Array2<f64> {
let data = self
.iter()
.map(|kernel| kernel.transition(t0, t1))
.collect::<Vec<_>>();
let dim = data
.iter()
.fold((0, 0), |(w, h), m| (w + m.ncols(), h + m.nrows()));
let mut transition = Array2::zeros(dim);
let mut r_d = 0;
let mut c_d = 0;
for m in data {
for ((r, c), v) in m.indexed_iter() {
transition[(r + r_d, c + c_d)] = *v;
}
r_d += m.nrows();
c_d += m.ncols();
}
transition
}
/*
def transition(self, t1, t2):
mats = [k.transition(t1, t2) for k in self.parts]
return block_diag(*mats)
*/
fn noise_cov(&self, t0: f64, t1: f64) -> Array2<f64> { fn noise_cov(&self, t0: f64, t1: f64) -> Array2<f64> {
let data = self let data = self
.iter() .iter()

View File

@@ -39,7 +39,7 @@ impl Kernel for Exponential {
} }
fn transition(&self, t0: f64, t1: f64) -> Array2<f64> { fn transition(&self, t0: f64, t1: f64) -> Array2<f64> {
-(t1 - t0) / self.l_scale * array![[1.0]] (-(t1 - t0) / self.l_scale).exp() * array![[1.0]]
} }
fn noise_cov(&self, t0: f64, t1: f64) -> Array2<f64> { fn noise_cov(&self, t0: f64, t1: f64) -> Array2<f64> {

View File

@@ -1,5 +1,7 @@
use std::f64::consts::PI; use std::f64::consts::PI;
use statrs::function::erf::erfc;
const CS: [f64; 14] = [ const CS: [f64; 14] = [
0.00048204, 0.00048204,
-0.00142906, -0.00142906,
@@ -36,10 +38,9 @@ const QS: [f64; 6] = [
/// Normal cumulative density function. /// Normal cumulative density function.
fn normcdf(x: f64) -> f64 { fn normcdf(x: f64) -> f64 {
// If X ~ N(0,1), returns P(X < x). let SQRT2: f64 = 2.0f64.sqrt();
// erfc(-x / SQRT2) / 2.0
// https://docs.rs/statrs/0.12.0/statrs/function/erf/fn.erfc.html erfc(-x / SQRT2) / 2.0
todo!();
} }
/// Compute the log of the normal cumulative density function. /// Compute the log of the normal cumulative density function.