Fixed a bug, and added a custom solve function (using lapacke).

This commit is contained in:
2020-02-24 11:51:26 +01:00
parent eae980a74c
commit 9b79722984
8 changed files with 144 additions and 18 deletions

View File

@@ -5,17 +5,20 @@ authors = ["Anders Olsson <anders.e.olsson@gmail.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
blas = "0.20"
cblas = "0.2" cblas = "0.2"
derivative = "1.0" derivative = "1.0"
expm = "0.1" expm = "0.1"
lapack = "0.16"
lapacke = "0.2" lapacke = "0.2"
ndarray = "0.13" ndarray = "0.13"
ndarray-linalg = { version = "0.12" } ndarray-linalg = { version = "0.12" }
openblas-src = { version = "0.8", features = ["static"] }
ordered-float = "1.0" ordered-float = "1.0"
rand = "0.7" rand = "0.7"
rand_xoshiro = "0.4" rand_xoshiro = "0.4"
statrs = "0.12" statrs = "0.12"
[dev-dependencies] [dev-dependencies]
# openblas-src = { version = "0.8", features = ["static"] }
intel-mkl-src = "0.5"
time = "0.2" time = "0.2"

66
examples/abcdef.rs Normal file
View File

@@ -0,0 +1,66 @@
extern crate intel_mkl_src;
use kickscore as ks;
fn main() {
let mut model = ks::BinaryModel::new(ks::BinaryModelObservation::Probit);
for player in &["A", "B", "C", "D", "E", "F"] {
let kernel: Vec<Box<dyn ks::Kernel>> = vec![
Box::new(ks::kernel::Constant::new(1.0)),
Box::new(ks::kernel::Matern52::new(0.5, 1.0)),
];
model.add_item(player, Box::new(kernel));
}
model.observe(&["A"], &["B"], 0.0);
model.fit();
for player in &["A", "B", "C", "D", "E", "F"] {
let (mu, sigma) = model.item_score(player, 1.25);
println!("{}: mu={} sigma={}", player, mu, sigma);
}
model.observe(&["C"], &["D"], 0.25);
model.fit();
for player in &["A", "B", "C", "D", "E", "F"] {
let (mu, sigma) = model.item_score(player, 1.25);
println!("{}: mu={} sigma={}", player, mu, sigma);
}
model.observe(&["E"], &["F"], 0.50);
model.fit();
for player in &["A", "B", "C", "D", "E", "F"] {
let (mu, sigma) = model.item_score(player, 1.25);
println!("{}: mu={} sigma={}", player, mu, sigma);
}
model.observe(&["B"], &["C"], 0.75);
model.fit();
for player in &["A", "B", "C", "D", "E", "F"] {
let (mu, sigma) = model.item_score(player, 1.25);
println!("{}: mu={} sigma={}", player, mu, sigma);
}
model.observe(&["D"], &["E"], 1.00);
model.fit();
for player in &["A", "B", "C", "D", "E", "F"] {
let (mu, sigma) = model.item_score(player, 1.25);
println!("{}: mu={} sigma={}", player, mu, sigma);
}
}

View File

@@ -1,4 +1,4 @@
extern crate openblas_src; extern crate intel_mkl_src;
use kickscore as ks; use kickscore as ks;

View File

@@ -1,4 +1,4 @@
extern crate openblas_src; extern crate intel_mkl_src;
use std::collections::HashSet; use std::collections::HashSet;
use std::fs; use std::fs;

View File

@@ -1,7 +1,6 @@
use derivative::Derivative; use derivative::Derivative;
use ndarray::prelude::*; use ndarray::prelude::*;
use ndarray::stack; use ndarray::stack;
use ndarray_linalg::Inverse;
use crate::kernel::Kernel; use crate::kernel::Kernel;
@@ -101,22 +100,22 @@ impl Fitter for RecursiveFitter {
} }
// Compute the new transition and noise covariance matrices. // Compute the new transition and noise covariance matrices.
let m = self.kernel.order();
for _ in 0..n_new {
self.a.push(Array2::zeros((m, m)));
self.q.push(Array2::zeros((m, m)));
}
for i in (self.ts.len() - n_new)..self.ts.len() { for i in (self.ts.len() - n_new)..self.ts.len() {
if i == 0 { if i == 0 {
continue; continue;
} }
self.a self.a[i - 1] = self.kernel.transition(self.ts[i - 1], self.ts[i]);
.push(self.kernel.transition(self.ts[i - 1], self.ts[i])); self.q[i - 1] = self.kernel.noise_cov(self.ts[i - 1], self.ts[i]);
self.q
.push(self.kernel.noise_cov(self.ts[i - 1], self.ts[i]));
} }
let m = self.kernel.order();
self.a.push(Array2::zeros((m, m)));
self.q.push(Array2::zeros((m, m)));
self.ts_new.clear(); self.ts_new.clear();
} }
@@ -180,9 +179,17 @@ impl Fitter for RecursiveFitter {
self.m_s[i] = self.m_f[i].clone(); self.m_s[i] = self.m_f[i].clone();
self.p_s[i] = self.p_f[i].clone(); self.p_s[i] = self.p_f[i].clone();
} else { } else {
let a = self.p_p[i + 1].clone();
let b = self.a[i].dot(&self.p_f[i]);
// println!("a={:#?}", a);
let g = crate::linalg::solve(a, b);
let g = g.t();
/*
let g = self.a[i] let g = self.a[i]
.dot(&self.p_f[i]) .dot(&self.p_f[i])
.dot(&self.p_p[i + 1].inv().expect("failed to inverse matrix")); .dot(&self.p_p[i + 1].inv().expect("failed to inverse matrix"));
*/
self.m_s[i] = &self.m_f[i] + &g.dot(&(&self.m_s[i + 1] - &self.m_p[i + 1])); self.m_s[i] = &self.m_f[i] + &g.dot(&(&self.m_s[i + 1] - &self.m_p[i + 1]));
self.p_s[i] = self.p_s[i] =
@@ -248,11 +255,10 @@ impl Fitter for RecursiveFitter {
// RTS update using the right neighbor. // RTS update using the right neighbor.
let a = self.kernel.transition(ts[i], self.ts[(j + 1) as usize]); let a = self.kernel.transition(ts[i], self.ts[(j + 1) as usize]);
let g = a.dot(&p).dot( let a = a.dot(&p);
&self.p_p[(j + 1) as usize] let b = self.p_p[(j + 1) as usize].clone();
.inv() let g = crate::linalg::solve(a, b);
.expect("failed to inverse matrix"), let g = g.t();
);
ms.push(self.h.dot( ms.push(self.h.dot(
&(&m + &g.dot(&(&self.m_s[(j + 1) as usize] - &self.m_p[(j + 1) as usize]))), &(&m + &g.dot(&(&self.m_s[(j + 1) as usize] - &self.m_p[(j + 1) as usize]))),

View File

@@ -5,6 +5,7 @@ mod expm;
mod fitter; mod fitter;
mod item; mod item;
pub mod kernel; pub mod kernel;
mod linalg;
mod model; mod model;
pub mod observation; pub mod observation;
mod storage; mod storage;

43
src/linalg.rs Normal file
View File

@@ -0,0 +1,43 @@
use ndarray::{prelude::*, DataMut};
fn as_slice_with_layout_mut<S, T, D>(a: &mut ArrayBase<S, D>) -> Option<(&mut [T], lapacke::Layout)>
where
S: DataMut<Elem = T>,
D: Dimension,
{
if a.as_slice_mut().is_some() {
Some((a.as_slice_mut().unwrap(), lapacke::Layout::RowMajor))
} else if a.as_slice_memory_order_mut().is_some() {
Some((
a.as_slice_memory_order_mut().unwrap(),
lapacke::Layout::ColumnMajor,
))
} else {
None
}
}
pub fn solve(mut a: Array2<f64>, mut b: Array2<f64>) -> Array2<f64> {
assert!(a.is_square());
let n = a.ncols() as i32;
let nrhs = b.ncols() as i32;
let lda = n;
let ldb = nrhs;
let info;
let mut ipiv = vec![0; n as usize];
let (a_slice, layout) = as_slice_with_layout_mut(&mut a).expect("Matrix `a` not contiguous.");
let (b_slice, _) = as_slice_with_layout_mut(&mut b).expect("Matrix `a` not contiguous.");
unsafe {
info = lapacke::dgesv(layout, n, nrhs, a_slice, lda, &mut ipiv, b_slice, ldb);
}
if info != 0 {
panic!("info={}", info);
}
b
}

View File

@@ -47,6 +47,13 @@ impl BinaryModel {
); );
} }
pub fn item_score(&self, name: &str, t: f64) -> (f64, f64) {
let id = self.storage.get_id(name);
let (ms, vs) = self.storage.item(id).fitter.predict(&[t]);
(ms[0], vs[0])
}
pub fn observe(&mut self, winners: &[&str], losers: &[&str], t: f64) { pub fn observe(&mut self, winners: &[&str], losers: &[&str], t: f64) {
if t < self.last_t { if t < self.last_t {
panic!("observations must be added in chronological order"); panic!("observations must be added in chronological order");