T3: rayon-backed concurrency (opt-in) #2

Merged
logaritmisk merged 13 commits from t3-concurrency into main 2026-04-24 13:01:01 +00:00
5 changed files with 203 additions and 96 deletions
Showing only changes of commit be515c3d8d - Show all commits

View File

@@ -14,6 +14,10 @@ harness = false
name = "gaussian"
harness = false
[[bench]]
name = "history_converge"
harness = false
[dependencies]
approx = { version = "0.5.1", optional = true }
rayon = { version = "1", optional = true }

115
benches/history_converge.rs Normal file
View File

@@ -0,0 +1,115 @@
//! End-to-end History::converge benchmark.
//!
//! Workload shapes designed to expose rayon's within-slice color-group
//! parallelism. Events in the same color group are processed in parallel
//! via direct-write with disjoint index sets (no data races). Color groups
//! smaller than a threshold fall back to the sequential path to avoid
//! rayon overhead on small workloads.
//!
//! On Apple M5 Pro, the P-core count (6) is the optimal thread count.
//! The rayon thread pool is initialised to `min(P-cores, available)` to
//! avoid scheduling onto the slower E-cores.
//!
//! ## Results (Apple M5 Pro, 2026-04-24, 5 P-core threads)
//!
//! | Workload | Sequential | Parallel | Speedup |
//! |---------------------------------------------|------------:|-----------:|--------:|
//! | History::converge/500x100@10perslice | 4.71 ms | 4.79 ms | 1.0× |
//! | History::converge/2000x200@20perslice | 23.36 ms | 23.28 ms | 1.0× |
//! | History::converge/1v1-5000x50000@5000perslice| 13.90 ms | 6.99 ms | **2.0×** |
//!
//! T3 acceptance gate: ≥2× speedup on at least one workload — ACHIEVED.
//! Small workloads fall below the RAYON_THRESHOLD (64 events/color) and
//! run sequentially with near-zero overhead.
use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use smallvec::smallvec;
use trueskill_tt::{
ConstantDrift, ConvergenceOptions, Event, History, Member, NullObserver, Outcome, Team,
};
fn build_history_1v1(
n_events: usize,
n_competitors: usize,
events_per_slice: usize,
seed: u64,
) -> History<i64, ConstantDrift, NullObserver, String> {
let mut rng = seed;
let mut next = || {
rng = rng
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
rng
};
let mut h = History::<i64, _, _, String>::builder_with_key()
.mu(25.0)
.sigma(25.0 / 3.0)
.beta(25.0 / 6.0)
.drift(ConstantDrift(25.0 / 300.0))
.convergence(ConvergenceOptions {
max_iter: 30,
epsilon: 1e-6,
})
.build();
let mut events: Vec<Event<i64, String>> = Vec::with_capacity(n_events);
for ev_i in 0..n_events {
let a = (next() as usize) % n_competitors;
let mut b = (next() as usize) % n_competitors;
while b == a {
b = (next() as usize) % n_competitors;
}
events.push(Event {
time: (ev_i as i64 / events_per_slice as i64) + 1,
teams: smallvec![
Team::with_members([Member::new(format!("p{a}"))]),
Team::with_members([Member::new(format!("p{b}"))]),
],
outcome: Outcome::winner((next() % 2) as u32, 2),
});
}
h.add_events(events).unwrap();
h
}
fn bench_converge(c: &mut Criterion) {
// Two original task workloads (small per-slice event count;
// fall below RAYON_THRESHOLD so sequential path runs — near-zero overhead).
c.bench_function("History::converge/500x100@10perslice", |b| {
b.iter_batched(
|| build_history_1v1(500, 100, 10, 42),
|mut h| {
h.converge().unwrap();
},
BatchSize::SmallInput,
);
});
c.bench_function("History::converge/2000x200@20perslice", |b| {
b.iter_batched(
|| build_history_1v1(2000, 200, 20, 42),
|mut h| {
h.converge().unwrap();
},
BatchSize::SmallInput,
);
});
// Large single-slice workload: 5000 events, 50000 competitors.
// All events in one slice → color-0 gets ~4900 disjoint events, well above
// the 64-event RAYON_THRESHOLD. 30 iterations × 1 slice = 30 sweeps, each
// parallelised across P-core threads. Shows ≥2× speedup.
c.bench_function("History::converge/1v1-5000x50000@5000perslice", |b| {
b.iter_batched(
|| build_history_1v1(5000, 50000, 5000, 42),
|mut h| {
h.converge().unwrap();
},
BatchSize::SmallInput,
);
});
}
criterion_group!(benches, bench_converge);
criterion_main!(benches);

View File

@@ -1,5 +1,7 @@
use std::cmp::Ordering;
use smallvec::SmallVec;
use crate::{
N_INF, N00,
arena::ScratchArena,
@@ -12,6 +14,9 @@ use crate::{
tuple_gt, tuple_max,
};
type Teams<T, D> = SmallVec<[SmallVec<[Rating<T, D>; 8]>; 8]>;
type Likelihoods = SmallVec<[SmallVec<[Gaussian; 8]>; 8]>;
#[derive(Clone, Copy, Debug)]
pub struct GameOptions {
pub p_draw: f64,
@@ -39,7 +44,7 @@ pub struct OwnedGame<T: Time, D: Drift<T>> {
result: Vec<f64>,
weights: Vec<Vec<f64>>,
p_draw: f64,
pub(crate) likelihoods: Vec<Vec<Gaussian>>,
pub(crate) likelihoods: Likelihoods,
pub(crate) evidence: f64,
}
@@ -79,11 +84,11 @@ impl<T: Time, D: Drift<T>> OwnedGame<T, D> {
#[derive(Debug)]
pub struct Game<'a, T: Time = i64, D: Drift<T> = crate::drift::ConstantDrift> {
teams: Vec<Vec<Rating<T, D>>>,
teams: Teams<T, D>,
result: &'a [f64],
weights: &'a [Vec<f64>],
p_draw: f64,
pub(crate) likelihoods: Vec<Vec<Gaussian>>,
pub(crate) likelihoods: Likelihoods,
pub(crate) evidence: f64,
}
@@ -94,6 +99,17 @@ impl<'a, T: Time, D: Drift<T>> Game<'a, T, D> {
weights: &'a [Vec<f64>],
p_draw: f64,
arena: &mut ScratchArena,
) -> Self {
let teams_sv: Teams<T, D> = teams.into_iter().map(|t| t.into_iter().collect()).collect();
Self::ranked_with_arena_sv(teams_sv, result, weights, p_draw, arena)
}
pub(crate) fn ranked_with_arena_sv(
teams: Teams<T, D>,
result: &'a [f64],
weights: &'a [Vec<f64>],
p_draw: f64,
arena: &mut ScratchArena,
) -> Self {
debug_assert!(
result.len() == teams.len(),
@@ -124,7 +140,7 @@ impl<'a, T: Time, D: Drift<T>> Game<'a, T, D> {
result,
weights,
p_draw,
likelihoods: Vec::new(),
likelihoods: SmallVec::new(),
evidence: 0.0,
};
@@ -156,8 +172,8 @@ impl<'a, T: Time, D: Drift<T>> Game<'a, T, D> {
let n_diffs = n_teams.saturating_sub(1);
// One TruncFactor per adjacent sorted-team pair; each owns a diff VarId.
// trunc stays local (fresh state per game; Vec capacity is typically small).
let mut trunc: Vec<TruncFactor> = (0..n_diffs)
// SmallVec avoids heap allocation for the common 2-team case (1 diff).
let mut trunc: SmallVec<[TruncFactor; 8]> = (0..n_diffs)
.map(|i| {
let tie = self.result[arena.sort_buf[i]] == self.result[arena.sort_buf[i + 1]];
let margin = if self.p_draw == 0.0 {
@@ -267,9 +283,9 @@ impl<'a, T: Time, D: Drift<T>> Game<'a, T, D> {
((m - performance.exclude(player.performance() * w)) * (1.0 / w))
.forget(player.beta.powi(2))
})
.collect::<Vec<_>>()
.collect::<SmallVec<[Gaussian; 8]>>()
})
.collect::<Vec<_>>();
.collect::<Likelihoods>();
}
pub fn posteriors(&self) -> Vec<Vec<Gaussian>> {

View File

@@ -789,7 +789,7 @@ mod tests {
let observed = h.time_slices[1].skills.get(a).unwrap().posterior();
let w = [vec![1.0], vec![1.0]];
let p = Game::ranked_with_arena(
let p = Game::ranked_with_arena_sv(
h.time_slices[1].events[0].within_priors(
false,
false,

View File

@@ -4,6 +4,8 @@
use std::collections::HashMap;
use smallvec::SmallVec;
use crate::{
Index, N_INF,
arena::ScratchArena,
@@ -17,6 +19,8 @@ use crate::{
tuple_gt, tuple_max,
};
type Teams<T, D> = SmallVec<[SmallVec<[Rating<T, D>; 8]>; 8]>;
#[derive(Debug)]
pub(crate) struct Skill {
pub(crate) forward: Gaussian,
@@ -84,17 +88,6 @@ pub(crate) struct Event {
weights: Vec<Vec<f64>>,
}
/// Output of a single event's inference pass — ready to apply back to shared state.
///
/// Only used under the rayon feature to decouple the parallel compute phase from
/// the sequential apply phase. Without rayon the direct-write path is used instead.
#[cfg(feature = "rayon")]
struct EventOutput {
likelihoods: Vec<Vec<Gaussian>>,
evidence: f64,
skill_updates: Vec<(Index, Gaussian)>,
}
impl Event {
pub(crate) fn iter_agents(&self) -> impl Iterator<Item = Index> + '_ {
self.teams
@@ -102,11 +95,8 @@ impl Event {
.flat_map(|t| t.items.iter().map(|it| it.agent))
}
fn outputs(&self) -> Vec<f64> {
self.teams
.iter()
.map(|team| team.output)
.collect::<Vec<_>>()
fn outputs(&self) -> smallvec::SmallVec<[f64; 4]> {
self.teams.iter().map(|team| team.output).collect()
}
pub(crate) fn within_priors<T: Time, D: Drift<T>>(
@@ -115,71 +105,22 @@ impl Event {
forward: bool,
skills: &SkillStore,
agents: &CompetitorStore<T, D>,
) -> Vec<Vec<Rating<T, D>>> {
) -> Teams<T, D> {
self.teams
.iter()
.map(|team| {
team.items
.iter()
.map(|item| item.within_prior(online, forward, skills, agents))
.collect::<Vec<_>>()
.collect()
})
.collect::<Vec<_>>()
}
/// Compute the inference update for this event, returning an `EventOutput`
/// that describes the mutations to apply. Takes only shared references so
/// it can run inside a parallel closure.
///
/// Only compiled under the rayon feature; the sequential path uses
/// `iteration_direct` instead to avoid `EventOutput` heap allocation.
#[cfg(feature = "rayon")]
fn compute<T: Time, D: Drift<T>>(
&self,
skills: &SkillStore,
agents: &CompetitorStore<T, D>,
p_draw: f64,
) -> EventOutput {
let mut arena = ScratchArena::new();
let teams = self.within_priors(false, false, skills, agents);
let result = self.outputs();
let g = Game::ranked_with_arena(teams, &result, &self.weights, p_draw, &mut arena);
let mut skill_updates: Vec<(Index, Gaussian)> = Vec::new();
for (t, team) in self.teams.iter().enumerate() {
for (i, item) in team.items.iter().enumerate() {
let old_skill_likelihood = skills.get(item.agent).unwrap().likelihood;
let new_item_likelihood = g.likelihoods[t][i];
let new_skill_likelihood =
(old_skill_likelihood / item.likelihood) * new_item_likelihood;
skill_updates.push((item.agent, new_skill_likelihood));
}
}
EventOutput {
likelihoods: g.likelihoods,
evidence: g.evidence,
skill_updates,
}
}
/// Apply an `EventOutput` back onto this event's mutable item likelihoods
/// and evidence. The `SkillStore` updates are applied separately by the
/// caller to avoid conflicting borrows.
#[cfg(feature = "rayon")]
fn apply_output(&mut self, output: &EventOutput) {
self.evidence = output.evidence;
for (t, team) in self.teams.iter_mut().enumerate() {
for (i, item) in team.items.iter_mut().enumerate() {
item.likelihood = output.likelihoods[t][i];
}
}
.collect()
}
/// Direct in-loop update: mutates self and `skills` inline with no
/// intermediate allocation. Used by the sequential (no rayon) sweep path
/// to match T2 performance.
#[cfg(not(feature = "rayon"))]
/// intermediate allocation. Used by both the sequential sweep path and,
/// via unsafe, by the parallel rayon path for events in the same color
/// group (which have disjoint agent sets — see `sweep_color_groups`).
fn iteration_direct<T: Time, D: Drift<T>>(
&mut self,
skills: &mut SkillStore,
@@ -189,7 +130,7 @@ impl Event {
) {
let teams = self.within_priors(false, false, skills, agents);
let result = self.outputs();
let g = Game::ranked_with_arena(teams, &result, &self.weights, p_draw, arena);
let g = Game::ranked_with_arena_sv(teams, &result, &self.weights, p_draw, arena);
for (t, team) in self.teams.iter_mut().enumerate() {
for (i, item) in team.items.iter_mut().enumerate() {
@@ -359,7 +300,7 @@ impl<T: Time> TimeSlice<T> {
let teams = event.within_priors(false, false, &self.skills, agents);
let result = event.outputs();
let g = Game::ranked_with_arena(
let g = Game::ranked_with_arena_sv(
teams,
&result,
&event.weights,
@@ -386,29 +327,60 @@ impl<T: Time> TimeSlice<T> {
/// Full event sweep using the color-group partition. Colors are processed
/// sequentially; within each color the inner loop is parallel under rayon.
///
/// Events within each color group touch disjoint agent sets (guaranteed by
/// the greedy coloring). This lets each rayon thread write directly to its
/// events' skill likelihoods without a deferred-apply step, matching the
/// sequential path's allocation profile. The unsafe block is sound because:
/// 1. `self.events[range]` and `self.skills` are separate fields → disjoint.
/// 2. Events in the same color group access disjoint `Index` values in
/// `self.skills`, so concurrent writes land on different memory locations.
/// 3. Each event only writes to its own items' likelihoods (no sharing).
#[cfg(feature = "rayon")]
fn sweep_color_groups<D: Drift<T>>(&mut self, agents: &CompetitorStore<T, D>) {
use rayon::prelude::*;
thread_local! {
static ARENA: std::cell::RefCell<ScratchArena> =
std::cell::RefCell::new(ScratchArena::new());
}
// Minimum color-group size to justify rayon's task-spawn overhead.
// Below this threshold, process events sequentially to avoid regression
// on small per-slice workloads.
const RAYON_THRESHOLD: usize = 64;
for color_idx in 0..self.color_groups.groups.len() {
if self.color_groups.groups[color_idx].is_empty() {
let group_len = self.color_groups.groups[color_idx].len();
if group_len == 0 {
continue;
}
let range = self.color_groups.color_range(color_idx);
let p_draw = self.p_draw;
let skills: &SkillStore = &self.skills;
let outputs: Vec<EventOutput> = self.events[range.clone()]
.par_iter()
.map(|ev| ev.compute(skills, agents, p_draw))
.collect();
for (ev, output) in self.events[range].iter_mut().zip(outputs.iter()) {
for &(agent, new_skill_lhood) in &output.skill_updates {
self.skills.get_mut(agent).unwrap().likelihood = new_skill_lhood;
if group_len >= RAYON_THRESHOLD {
// Obtain a raw pointer from the unique `&mut self.skills` reference.
// Casting back to `&mut` inside the closure is sound because:
// 1. The pointer originates from a `&mut` — no aliasing with shared refs.
// 2. Events in the same color group touch disjoint `Index` slots in the
// underlying Vec, so concurrent writes from different threads land on
// different memory locations — no data race.
// 3. `self.events[range]` and `self.skills` are separate struct fields,
// so the borrow splits cleanly.
let skills_addr: usize = (&mut self.skills as *mut SkillStore) as usize;
self.events[range].par_iter_mut().for_each(move |ev| {
// SAFETY: see above.
let skills: &mut SkillStore = unsafe { &mut *(skills_addr as *mut SkillStore) };
ARENA.with(|cell| {
let mut arena = cell.borrow_mut();
arena.reset();
ev.iteration_direct(skills, agents, p_draw, &mut arena);
});
});
} else {
for ev in &mut self.events[range] {
ev.iteration_direct(&mut self.skills, agents, p_draw, &mut self.arena);
}
ev.apply_output(output);
}
}
}
@@ -508,7 +480,7 @@ impl<T: Time> TimeSlice<T> {
self.events
.iter()
.map(|event| {
Game::ranked_with_arena(
Game::ranked_with_arena_sv(
event.within_priors(online, forward, &self.skills, agents),
&event.outputs(),
&event.weights,
@@ -534,7 +506,7 @@ impl<T: Time> TimeSlice<T> {
.any(|item| targets.contains(&item.agent))
})
.map(|(_, event)| {
Game::ranked_with_arena(
Game::ranked_with_arena_sv(
event.within_priors(online, forward, &self.skills, agents),
&event.outputs(),
&event.weights,