summaryrefslogtreecommitdiff
path: root/src/strategy/mcts.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/strategy/mcts.rs')
-rw-r--r--src/strategy/mcts.rs230
1 files changed, 0 insertions, 230 deletions
diff --git a/src/strategy/mcts.rs b/src/strategy/mcts.rs
deleted file mode 100644
index c122fa1..0000000
--- a/src/strategy/mcts.rs
+++ /dev/null
@@ -1,230 +0,0 @@
-use crate::command::{Action, Command};
-use crate::game::{GameBoard, SimulationOutcome};
-
-use std::cmp;
-use std::collections::HashMap;
-use std::ops::*;
-use time::{Duration, PreciseTime};
-
-pub fn choose_move(
- state: &GameBoard,
- previous_root: Option<Node>,
- start_time: PreciseTime,
- max_time: Duration,
-) -> (Command, Node) {
- let mut root_node = match previous_root {
- None => Node {
- state: state.clone(),
- score_sum: ScoreSum::new(),
- player_score_sums: [HashMap::new(), HashMap::new()],
- unexplored: mcts_move_combo(state),
- children: HashMap::new(),
- },
- Some(mut node) => node
- .children
- .drain()
- .map(|(_k, n)| n)
- .find(|n| n.state == *state)
- .unwrap_or_else(|| {
- eprintln!("Previous round did not appear in the cache");
- Node {
- state: state.clone(),
- score_sum: ScoreSum::new(),
- player_score_sums: [HashMap::new(), HashMap::new()],
- unexplored: mcts_move_combo(state),
- children: HashMap::new(),
- }
- }),
- };
-
- while start_time.to(PreciseTime::now()) < max_time {
- let _ = mcts(&mut root_node);
- }
-
- eprintln!("Number of simulations: {}", root_node.score_sum.visit_count);
- for (command, score_sum) in &root_node.player_score_sums[0] {
- eprintln!(
- "{} = {} ({} visits)",
- command,
- score_sum.avg().val,
- score_sum.visit_count
- );
- }
-
- let chosen_command = best_player_move(&root_node);
-
- root_node
- .children
- .retain(|[c1, _], _| *c1 == chosen_command);
-
- (chosen_command, root_node)
-}
-
-pub struct Node {
- state: GameBoard,
- score_sum: ScoreSum,
- player_score_sums: [HashMap<Command, ScoreSum>; 2],
- unexplored: Vec<[Command; 2]>,
- children: HashMap<[Command; 2], Node>,
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
-struct Score {
- val: f32,
-}
-
-impl AddAssign for Score {
- fn add_assign(&mut self, other: Self) {
- self.val = self.val + other.val;
- }
-}
-
-impl Div<u32> for Score {
- type Output = Self;
- fn div(self, other: u32) -> Self {
- Score {
- val: self.val / other as f32,
- }
- }
-}
-
-impl cmp::Eq for Score {}
-impl cmp::Ord for Score {
- fn cmp(&self, other: &Score) -> cmp::Ordering {
- self.val
- .partial_cmp(&other.val)
- .unwrap_or(cmp::Ordering::Equal)
- }
-}
-
-struct ScoreSum {
- sum: Score,
- visit_count: u32,
-}
-
-impl ScoreSum {
- fn new() -> ScoreSum {
- ScoreSum {
- sum: Score { val: 0. },
- visit_count: 0,
- }
- }
- fn with_initial(score: Score) -> ScoreSum {
- ScoreSum {
- sum: score,
- visit_count: 1,
- }
- }
- fn avg(&self) -> Score {
- self.sum / self.visit_count
- }
-}
-
-impl AddAssign<Score> for ScoreSum {
- fn add_assign(&mut self, other: Score) {
- self.sum += other;
- self.visit_count = self.visit_count.saturating_add(1);
- }
-}
-
-fn mcts(node: &mut Node) -> Score {
- if node.state.outcome != SimulationOutcome::Continue {
- score(&node.state)
- } else if let Some(commands) = node.unexplored.pop() {
- let mut new_state = node.state.clone();
- new_state.simulate(commands);
- let score = rollout(&new_state);
- let unexplored = if new_state.outcome == SimulationOutcome::Continue {
- mcts_move_combo(&new_state)
- } else {
- Vec::new()
- };
-
- let new_node = Node {
- state: new_state,
- score_sum: ScoreSum::with_initial(score),
- player_score_sums: [HashMap::new(), HashMap::new()],
- unexplored,
- children: HashMap::new(),
- };
- node.children.insert(commands, new_node);
-
- update(node, commands, score);
- score
- } else {
- let commands = choose_existing(node);
- let score = mcts(
- node.children
- .get_mut(&commands)
- .expect("The existing node hasn't been tried yet"),
- );
- update(node, commands, score);
- score
- }
-}
-
-fn mcts_move_combo(state: &GameBoard) -> Vec<[Command; 2]> {
- let player_moves = state.valid_moves(0);
- let opponent_moves = state.valid_moves(1);
- debug_assert!(!player_moves.is_empty(), "No player moves");
- debug_assert!(!opponent_moves.is_empty(), "No opponent moves");
-
- let mut result = Vec::with_capacity(player_moves.len() * opponent_moves.len());
- for p in &player_moves {
- for o in &opponent_moves {
- result.push([*p, *o]);
- }
- }
-
- result
-}
-
-fn best_player_move(node: &Node) -> Command {
- node.player_score_sums[0]
- .iter()
- .max_by_key(|(_command, score_sum)| score_sum.avg())
- .map(|(command, _score_sum)| *command)
- .unwrap_or_else(|| Command::new(Action::DoNothing))
-}
-
-fn score(state: &GameBoard) -> Score {
- Score {
- val: match state.outcome {
- SimulationOutcome::PlayerWon(0) => 3000.,
- SimulationOutcome::PlayerWon(1) => -3000.,
- _ => (state.players[0].score() - state.players[1].score()) as f32,
- },
- }
-}
-
-fn rollout(state: &GameBoard) -> Score {
- score(state)
-}
-
-fn choose_existing(node: &Node) -> [Command; 2] {
- [choose_one_existing(node, 0), choose_one_existing(node, 1)]
-}
-
-fn choose_one_existing(node: &Node, player_index: usize) -> Command {
- let ln_n = (node.score_sum.visit_count as f32).ln();
- let c = 100.;
- let multiplier = if player_index == 0 { 1. } else { -1. };
- node.player_score_sums[player_index]
- .iter()
- .max_by_key(|(_command, score_sum)| {
- (multiplier * (score_sum.avg().val + c * (ln_n / score_sum.visit_count as f32).sqrt()))
- as i32
- })
- .map(|(command, _score_sum)| *command)
- .unwrap_or_else(|| Command::new(Action::DoNothing))
-}
-
-fn update(node: &mut Node, commands: [Command; 2], score: Score) {
- *node.player_score_sums[0]
- .entry(commands[0])
- .or_insert_with(ScoreSum::new) += score;
- *node.player_score_sums[1]
- .entry(commands[1])
- .or_insert_with(ScoreSum::new) += score;
- node.score_sum += score;
-}