Additional visualization for round 4

This commit is contained in:
vandomej 2025-09-05 09:24:53 -07:00
parent 4efba94ff4
commit 7156d6d733
11 changed files with 721 additions and 129 deletions

4
.gitignore vendored
View file

@ -14,3 +14,7 @@ settings.json
.DS_Store
.vscode/alive
# Added by cargo
/target

View file

@ -5,10 +5,10 @@ from collections import defaultdict
import numpy as np
# Simplified JSON data for demonstration
with open('gemla/round2.json', 'r') as file:
with open('gemla/round4.json', 'r') as file:
simplified_json_data = json.load(file)
target_node_id = '0c1e64dc-6ddf-4dbb-bf6e-e8218b925194'
target_node_id = '523f8250-3101-4586-90a1-127ffa6d73d9'
# Function to traverse the tree to find a node id
def traverse_left_nodes(node):
@ -67,7 +67,7 @@ fig, ax = plt.subplots(figsize=(10, 6))
boxplots = ax.boxplot(scores_values, vert=False, patch_artist=True, labels=[f'Set {i+1}' for i in range(len(scores_values))])
# Set figure name to node id
# fig.canvas.set_window_title('Main node line')
ax.set_xscale('symlog', linthresh=1.0)
# Labeling
ax.set_xlabel(f'Scores - Main Line')
@ -79,33 +79,35 @@ ax.set_yticklabels([f'Set {i+1}' for i in range(len(scores_values))])
# Getting most recent right graph
right_nodes = traverse_right_nodes(simplified_json_data[0])
target_node_id = None
target_node = None
if target_node_id:
if len(right_nodes) != 0:
target_node_id = None
target_node = None
if target_node_id:
for node in right_nodes:
if node["val"]["id"] == target_node_id:
target_node = node
break
else:
target_node = right_nodes[1]
scores = target_node["val"]["node"]["scores"]
else:
target_node = right_nodes[0]
scores = target_node["val"]["node"]["scores"]
scores_values = [list(score_set.values()) for score_set in scores]
scores_values = [list(score_set.values()) for score_set in scores]
# Set up the figure for plotting on the same graph
fig, ax = plt.subplots(figsize=(10, 6))
# Set up the figure for plotting on the same graph
fig, ax = plt.subplots(figsize=(10, 6))
# Generate a boxplot for each set of scores on the same graph
boxplots = ax.boxplot(scores_values, vert=False, patch_artist=True, labels=[f'Set {i+1}' for i in range(len(scores_values))])
# Generate a boxplot for each set of scores on the same graph
boxplots = ax.boxplot(scores_values, vert=False, patch_artist=True, labels=[f'Set {i+1}' for i in range(len(scores_values))])
ax.set_xscale('symlog', linthresh=1.0)
# Labeling
ax.set_xlabel(f'Scores: {target_node['val']['id']}')
ax.set_ylabel('Score Sets')
ax.yaxis.grid(True) # Add horizontal grid lines for clarity
# Labeling
ax.set_xlabel(f'Scores: {target_node['val']['id']}')
ax.set_ylabel('Score Sets')
ax.yaxis.grid(True) # Add horizontal grid lines for clarity
# Set y-axis labels to be visible
ax.set_yticklabels([f'Set {i+1}' for i in range(len(scores_values))])
# Set y-axis labels to be visible
ax.set_yticklabels([f'Set {i+1}' for i in range(len(scores_values))])
# Find the highest scoring sets combining all scores and generations
scores = []
@ -121,7 +123,8 @@ for node in left_nodes:
scores.append(translated_node_scores)
# Add scores from the right nodes
for node in right_nodes:
if len(right_nodes) != 0:
for node in right_nodes:
if node["val"]["node"]:
node_scores = node["val"]["node"]["scores"]
translated_node_scores = []
@ -155,9 +158,13 @@ labels = [f'{id[:8]}... Gen {gen}' for id, gen in top_20_individual_generations]
# Generate box and whisker plots for the top 20 individual generations
fig, ax = plt.subplots(figsize=(12, 10))
ax.boxplot(top_20_scores, vert=False, patch_artist=True, labels=labels)
ax.set_xscale('symlog', linthresh=1.0)
ax.set_xlabel('Scores')
ax.set_ylabel('Individual Generation')
ax.set_title('Top 20 Individual Generations by Q3 Value')
ax.yaxis.grid(True) # Add horizontal grid lines for clarity
# Display the plot
plt.show()

View file

@ -0,0 +1,9 @@
[package]
name = "extract_fann_data"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fann = "0.1.8"

View file

@ -0,0 +1,11 @@
fn main() {
// Replace this with the path to the directory containing `fann.lib`
let lib_dir = "F://vandomej/Downloads/vcpkg/packages/fann_x64-windows/lib";
println!("cargo:rustc-link-search=native={}", lib_dir);
println!("cargo:rustc-link-lib=static=fann");
// Use `dylib=fann` instead of `static=fann` if you're linking dynamically
// If there are any additional directories where the compiler can find header files, you can specify them like this:
// println!("cargo:include={}", path_to_include_directory);
}

View file

@ -0,0 +1,38 @@
extern crate fann;
use fann::Fann;
use std::os::raw::c_uint;
fn main() {
let args: Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Usage: {} <network_file>", args[0]);
std::process::exit(1);
}
let network_file = &args[1];
match Fann::from_file(network_file) {
Ok(ann) => {
// Output layer sizes
let layer_sizes = ann.get_layer_sizes();
let bias_counts = ann.get_bias_counts();
println!("Layers:");
for (layer_size, bias_count) in layer_sizes.iter().zip(bias_counts.iter()) {
println!("{} {}", layer_size, bias_count);
}
// Output connections
println!("Connections:");
let connections = ann.get_connections();
for connection in connections {
println!("{} {} {}", connection.from_neuron, connection.to_neuron, connection.weight);
}
},
Err(err) => {
eprintln!("Error loading network from file {}: {}", network_file, err);
std::process::exit(1);
}
}
}

View file

@ -22,29 +22,39 @@ use std::{
ops::Range,
path::{Path, PathBuf},
};
use tokio::{process::Command, sync::mpsc::channel};
use tokio::process::Command;
use uuid::Uuid;
use self::neural_network_utility::{crossbreed, major_mutation};
const BASE_DIR: &str = "F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations";
const POPULATION: usize = 50;
const POPULATION: usize = 200;
const NEURAL_NETWORK_INPUTS: usize = 18;
const NEURAL_NETWORK_INPUTS: usize = 22;
const NEURAL_NETWORK_OUTPUTS: usize = 8;
const NEURAL_NETWORK_HIDDEN_LAYERS_MIN: usize = 1;
const NEURAL_NETWORK_HIDDEN_LAYERS_MAX: usize = 10;
const NEURAL_NETWORK_HIDDEN_LAYERS_MAX: usize = 2;
const NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MIN: usize = 3;
const NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX: usize = 35;
const NEURAL_NETWORK_INITIAL_WEIGHT_MIN: f32 = -2.0;
const NEURAL_NETWORK_INITIAL_WEIGHT_MAX: f32 = 2.0;
const NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX: usize = 50;
const NEURAL_NETWORK_INITIAL_WEIGHT_MAX: f32 = 0.5;
const NEURAL_NETWORK_MINOR_MUTATION_RATE_MAX: f32 = 0.3;
const NEURAL_NETWORK_MUTATION_WEIGHT_MAX: f32 = 1.0;
const NEURAL_NETWORK_MAJOR_MUTATION_RATE_MAX: f32 = 1.0;
const NEURAL_NETWORK_CROSSBREED_SEGMENTS_MIN: usize = 2;
const NEURAL_NETWORK_CROSSBREED_SEGMENTS_MAX: usize = 20;
const OFFSHOOT_GENERATIONAL_LENIENCE: u64 = 5;
const NEURAL_NETWORK_CROSSBREED_SEGMENTS_MAX: usize = 6;
const OFFSHOOT_GENERATIONAL_LENIENCE: u64 = 10;
const MAINLINE_GENERATIONAL_LENIENCE: u64 = 20;
const SIMULATION_ROUNDS: usize = 5;
const SURVIVAL_RATE: f32 = 0.5;
const SURVIVAL_RATE_MIN: f32 = 0.1;
const SURVIVAL_RATE_MAX: f32 = 0.9;
const GAME_EXECUTABLE_PATH: &str =
"F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Package\\Windows\\AI_Fight_Sim.exe";
@ -77,6 +87,7 @@ pub struct FighterNN {
pub id_mapping: Vec<HashMap<u64, u64>>,
pub lerp_amount: f32,
pub generational_lenience: u64,
pub survival_rate: f32,
}
#[async_trait]
@ -102,9 +113,8 @@ impl GeneticNode for FighterNN {
})?;
let mut nn_shapes = HashMap::new();
let weight_initialization_range = thread_rng()
.gen_range(NEURAL_NETWORK_INITIAL_WEIGHT_MIN..0.0)
..thread_rng().gen_range(0.0..=NEURAL_NETWORK_INITIAL_WEIGHT_MAX);
let weight_initialization_amplitude = thread_rng().gen_range(0.0..NEURAL_NETWORK_INITIAL_WEIGHT_MAX);
let weight_initialization_range = -weight_initialization_amplitude..weight_initialization_amplitude;
// Create the first generation in this folder
for i in 0..POPULATION {
@ -115,11 +125,11 @@ impl GeneticNode for FighterNN {
// Randomly generate a neural network shape based on constants
let hidden_layers = thread_rng()
.gen_range(NEURAL_NETWORK_HIDDEN_LAYERS_MIN..NEURAL_NETWORK_HIDDEN_LAYERS_MAX);
.gen_range(NEURAL_NETWORK_HIDDEN_LAYERS_MIN..=NEURAL_NETWORK_HIDDEN_LAYERS_MAX);
let mut nn_shape = vec![NEURAL_NETWORK_INPUTS as u32];
for _ in 0..hidden_layers {
nn_shape.push(thread_rng().gen_range(
NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MIN..NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX,
NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MIN..=NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX,
) as u32);
}
nn_shape.push(NEURAL_NETWORK_OUTPUTS as u32);
@ -138,30 +148,31 @@ impl GeneticNode for FighterNN {
}
let mut crossbreed_segments = thread_rng().gen_range(
NEURAL_NETWORK_CROSSBREED_SEGMENTS_MIN..NEURAL_NETWORK_CROSSBREED_SEGMENTS_MAX,
NEURAL_NETWORK_CROSSBREED_SEGMENTS_MIN..=NEURAL_NETWORK_CROSSBREED_SEGMENTS_MAX,
);
if crossbreed_segments % 2 == 0 {
crossbreed_segments += 1;
}
let mutation_weight_amplitude = thread_rng().gen_range(0.0..1.0);
let mutation_weight_amplitude = thread_rng().gen_range(0.0..NEURAL_NETWORK_MUTATION_WEIGHT_MAX);
Ok(Box::new(FighterNN {
id: context.id,
folder,
population_size: POPULATION,
generation: 0,
scores: vec![HashMap::new()],
scores: vec![],
nn_shapes: vec![nn_shapes],
// we need crossbreed segments to be even
crossbreed_segments,
weight_initialization_range,
minor_mutation_rate: thread_rng().gen_range(0.0..1.0),
major_mutation_rate: thread_rng().gen_range(0.0..1.0),
minor_mutation_rate: thread_rng().gen_range(0.0..NEURAL_NETWORK_MINOR_MUTATION_RATE_MAX),
major_mutation_rate: thread_rng().gen_range(0.0..NEURAL_NETWORK_MAJOR_MUTATION_RATE_MAX),
mutation_weight_range: -mutation_weight_amplitude..mutation_weight_amplitude,
id_mapping: vec![HashMap::new()],
id_mapping: vec![],
lerp_amount: 0.0,
generational_lenience: OFFSHOOT_GENERATIONAL_LENIENCE,
survival_rate: thread_rng().gen_range(SURVIVAL_RATE_MIN..SURVIVAL_RATE_MAX),
}))
}
@ -189,12 +200,14 @@ impl GeneticNode for FighterNN {
i
};
let secondary_id = loop {
if allotted_simulations.is_empty() {
if allotted_simulations.is_empty() || allotted_simulations.len() == 1 {
// Select a random id
let random_id = loop {
let id = thread_rng().gen_range(0..self.population_size);
if id != primary_id {
allotted_simulations.clear();
break id;
}
};
@ -219,19 +232,18 @@ impl GeneticNode for FighterNN {
matches.push((primary_id, secondary_id));
}
debug!("Matches determined");
trace!("Matches: {:?}", matches);
// Create a channel to send the scores back to the main thread
let (tx, mut rx) = channel::<(usize, f32)>(self.population_size * SIMULATION_ROUNDS * 20);
let mut tasks = Vec::new();
for (primary_id, secondary_id) in matches.iter() {
let task = {
let self_clone = self.clone();
let semaphore_clone = context.gemla_context.shared_semaphore.clone();
let display_simulation_semaphore = context.gemla_context.visible_simulations.clone();
let tx = tx.clone();
let task = async move {
let folder = self_clone.folder.clone();
let generation = self_clone.generation;
@ -245,9 +257,9 @@ impl GeneticNode for FighterNN {
.join(self_clone.get_individual_id(*secondary_id as u64))
.with_extension("net");
let permit = semaphore_clone
.acquire_owned()
.await
// Introducing a new scope for acquiring permits and running simulations
let simulation_result = async move {
let permit = semaphore_clone.acquire_owned().await
.with_context(|| "Failed to acquire semaphore permit")?;
let display_simulation = match display_simulation_semaphore.try_acquire_owned() {
@ -255,66 +267,73 @@ impl GeneticNode for FighterNN {
Err(_) => None,
};
let (primary_score, secondary_score) =
if let Some(display_simulation) = display_simulation {
let (primary_score, secondary_score) = if let Some(display_simulation) = display_simulation {
let result = run_1v1_simulation(&primary_nn, &secondary_nn, true).await?;
drop(display_simulation);
drop(display_simulation); // Explicitly dropping resources no longer needed
result
} else {
run_1v1_simulation(&primary_nn, &secondary_nn, false).await?
};
drop(permit);
drop(permit); // Explicitly dropping resources no longer needed
debug!(
"{} vs {} -> {} vs {}",
primary_id, secondary_id, primary_score, secondary_score
);
// Send score using a channel
tx.send((*primary_id, primary_score))
.await
.with_context(|| "Failed to send score")?;
tx.send((*secondary_id, secondary_score))
.await
.with_context(|| "Failed to send score")?;
Ok((*primary_id, primary_score, *secondary_id, secondary_score))
}; // Await the scoped async block immediately
Ok(())
// The result of the simulation, whether Ok or Err, is returned here.
// This ensures tx is dropped when the block exits, regardless of success or failure.
simulation_result
};
tasks.push(task);
}
let results: Vec<Result<(), Error>> = join_all(tasks).await;
debug!("Tasks created");
let results: Vec<Result<(usize, f32, usize, f32), Error>> = join_all(tasks).await;
debug!("Tasks completed");
// resolve results for any errors
let mut scores = HashMap::new();
for result in results.into_iter() {
result.with_context(|| "Failed to run simulation")?;
let (primary_id, primary_score, secondary_id, secondary_score) = result.with_context(|| "Failed to run simulation")?;
// If score exists, add the new score to the existing score
if let Some((existing_score, count)) = scores.get_mut(&(primary_id as u64)) {
*existing_score += primary_score;
*count += 1;
} else {
scores.insert(primary_id as u64, (primary_score, 1));
}
// Receive the scores from the channel
let mut scores = HashMap::new();
while let Some((id, score)) = rx.recv().await {
// If score exists, add the new score to the existing score
if let Some(existing_score) = scores.get_mut(&(id as u64)) {
*existing_score += score;
if let Some((existing_score, count)) = scores.get_mut(&(secondary_id as u64)) {
*existing_score += secondary_score;
*count += 1;
} else {
scores.insert(id as u64, score);
scores.insert(secondary_id as u64, (secondary_score, 1));
}
}
// Average scores for each individual
for (_, score) in scores.iter_mut() {
*score /= SIMULATION_ROUNDS as f32;
let mut final_scores = HashMap::new();
for (i, (score, count)) in scores.iter() {
final_scores.insert(*i, *score / *count as f32);
}
self.scores.push(scores);
self.scores.push(final_scores);
Ok(should_continue(&self.scores, self.generational_lenience)?)
}
async fn mutate(&mut self, _context: GeneticNodeContext<Self::Context>) -> Result<(), Error> {
let survivor_count = (self.population_size as f32 * SURVIVAL_RATE) as usize;
let survivor_count = (self.population_size as f32 * self.survival_rate) as usize;
let mut nn_sizes = Vec::new();
let mut id_mapping = HashMap::new();
@ -359,7 +378,7 @@ impl GeneticNode for FighterNN {
let mut tasks = Vec::new();
// Take the remaining nn's and create new nn's by the following:
for i in 0..survivor_count {
for i in 0..(self.population_size - survivor_count) {
let self_clone = self.clone();
// randomly select individual id's sorted scores proportional to their score
@ -443,7 +462,6 @@ impl GeneticNode for FighterNN {
.collect::<HashMap<_, _>>();
self.generation += 1;
self.scores.push(HashMap::new());
self.nn_shapes.push(nn_sizes_map);
self.id_mapping.push(id_mapping);
@ -524,6 +542,8 @@ impl GeneticNode for FighterNN {
run_1v1_simulation(&left_nn_path, &right_nn_path, false).await?
};
debug!("{} vs {} -> {} vs {}", left_nn_id, right_nn_id, left_score, right_score);
drop(permit);
Ok::<(f32, f32), Error>((left_score, right_score))
@ -646,21 +666,27 @@ impl GeneticNode for FighterNN {
debug!("mutation_weight_range: {:?}", mutation_weight_range);
let survival_rate = left.survival_rate.lerp(right.survival_rate, lerp_amount);
debug!("survival_rate: {}", survival_rate);
Ok(Box::new(FighterNN {
id: *id,
folder,
generation: 0,
population_size: nn_shapes.len(),
scores: vec![HashMap::new()],
scores: vec![],
crossbreed_segments,
nn_shapes: vec![nn_shapes],
weight_initialization_range,
minor_mutation_rate,
major_mutation_rate,
mutation_weight_range,
id_mapping: vec![HashMap::new()],
id_mapping: vec![],
lerp_amount,
// generational_lenience: left.generational_lenience + MAINLINE_GENERATIONAL_LENIENCE,
generational_lenience: MAINLINE_GENERATIONAL_LENIENCE,
survival_rate,
}))
}
}
@ -713,7 +739,7 @@ fn should_continue(scores: &[HashMap<u64, f32>], lenience: u64) -> Result<bool,
debug!(
"Highest Q3 value: {} at generation {}, Highest Median value: {} at generation {}, Continuing? {}",
highest_q3_value, generation_with_highest_q3, highest_median, generation_with_highest_median, result
highest_q3_value, generation_with_highest_q3 + 1, highest_median, generation_with_highest_median + 1, result
);
Ok(result)
@ -767,7 +793,7 @@ async fn run_1v1_simulation(
.await
.with_context(|| format!("Failed to read score from file: {:?}", score_file))?;
debug!(
trace!(
"{} scored {}, while {} scored {}",
nn_1_id, round_score, nn_2_id, opposing_score
);
@ -790,7 +816,7 @@ async fn run_1v1_simulation(
format!("Failed to read score from file: {:?}", opposite_score_file)
})?;
debug!(
trace!(
"{} scored {}, while {} scored {}",
nn_1_id, round_score, nn_2_id, opposing_score
);
@ -847,7 +873,7 @@ async fn run_1v1_simulation(
.await
.with_context(|| format!("Failed to read score from file: {:?}", score_file))?;
debug!(
trace!(
"{} scored {}, while {} scored {}",
nn_1_id, round_score, nn_2_id, opposing_score
);
@ -885,21 +911,17 @@ async fn read_score_from_file(file_path: &Path, nn_id: &str) -> Result<f32, io::
"NN ID not found in scores file",
));
}
Err(e)
if e.kind() == io::ErrorKind::WouldBlock
|| e.kind() == io::ErrorKind::PermissionDenied
|| e.kind() == io::ErrorKind::Other =>
Err(_) =>
{
if attempts >= 5 {
if attempts >= 2 {
// Attempt 5 times before giving up.
return Err(e);
return Ok(-100.0);
}
attempts += 1;
// wait 1 second to ensure the file is written
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;
}
Err(e) => return Err(e),
}
}
}

View file

@ -109,9 +109,9 @@ pub fn consolidate_old_connections(
let primary_shape = primary.get_layer_sizes();
let secondary_shape = secondary.get_layer_sizes();
debug!("Primary shape: {:?}", primary_shape);
debug!("Secondary shape: {:?}", secondary_shape);
debug!("New shape: {:?}", new_shape);
trace!("Primary shape: {:?}", primary_shape);
trace!("Secondary shape: {:?}", secondary_shape);
trace!("New shape: {:?}", new_shape);
// Start by iterating layer by later
let primary_connections = primary.get_connections();

View file

@ -6,6 +6,7 @@ use crate::error::Error;
use anyhow::Context;
use async_trait::async_trait;
use log::info;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::fmt::Debug;
use uuid::Uuid;
@ -147,6 +148,8 @@ where
.await
.with_context(|| format!("Error simulating node: {:?}", self))?;
info!("Simulation complete and continuing: {:?}", next_generation);
self.state = if next_generation {
GeneticState::Mutate
} else {

380
parameter_analysis.py Normal file
View file

@ -0,0 +1,380 @@
# Re-importing necessary libraries
import json
import matplotlib.pyplot as plt
from collections import defaultdict
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.ticker as ticker
# Simplified JSON data for demonstration
with open('gemla/round4.json', 'r') as file:
simplified_json_data = json.load(file)
# Function to traverse the tree to find a node id
def traverse_right_nodes(node):
if node is None:
return []
right_node = node.get("right")
left_node = node.get("left")
if right_node is None and left_node is None:
return []
elif right_node and left_node:
return [right_node] + traverse_right_nodes(left_node)
return []
# Getting most recent right graph
right_nodes = traverse_right_nodes(simplified_json_data[0])
# Heatmaps
# Data structure to store mutation rates, generations, and scores
mutation_rate_data = defaultdict(lambda: defaultdict(list))
# Populate the dictionary with scores indexed by mutation rate and generation
for node in right_nodes:
node_val = node["val"]["node"]
if node_val:
scores = node_val["scores"]
minor_mutation_rate = node_val["minor_mutation_rate"]
generation = node_val["generation"]
# Ensure each score is associated with the correct generation
for gen_index, score_list in enumerate(scores):
for score in score_list.values():
mutation_rate_data[minor_mutation_rate][gen_index].append(score)
# Prepare data for heatmap
max_generation = max(max(gens.keys()) for gens in mutation_rate_data.values())
heatmap_data = np.full((len(mutation_rate_data), max_generation + 1), np.nan)
# Populate the heatmap data with average scores
mutation_rates = sorted(mutation_rate_data.keys())
for i, mutation_rate in enumerate(mutation_rates):
for generation in range(max_generation + 1):
scores = mutation_rate_data[mutation_rate][generation]
if scores: # Check if there are scores for this generation
heatmap_data[i, generation] = np.mean(scores)
# Creating a DataFrame for the heatmap
df_heatmap = pd.DataFrame(
data=heatmap_data,
index=mutation_rates,
columns=range(max_generation + 1)
)
# Data structure to store major mutation rates, generations, and scores
major_mutation_rate_data = defaultdict(lambda: defaultdict(list))
# Populate the dictionary with scores indexed by major mutation rate and generation
# This is assuming the structure to retrieve major_mutation_rate is similar to minor_mutation_rate
for node in right_nodes:
node_val = node["val"]["node"]
if node_val:
scores = node_val["scores"]
major_mutation_rate = node_val["major_mutation_rate"]
generation = node_val["generation"]
for gen_index, score_list in enumerate(scores):
for score in score_list.values():
major_mutation_rate_data[major_mutation_rate][gen_index].append(score)
# Prepare the heatmap data for major_mutation_rate similar to minor_mutation_rate
major_heatmap_data = np.full((len(major_mutation_rate_data), max_generation + 1), np.nan)
major_mutation_rates = sorted(major_mutation_rate_data.keys())
for i, major_rate in enumerate(major_mutation_rates):
for generation in range(max_generation + 1):
scores = major_mutation_rate_data[major_rate][generation]
if scores: # Check if there are scores for this generation
major_heatmap_data[i, generation] = np.mean(scores)
# Creating a DataFrame for the major mutation rate heatmap
df_major_heatmap = pd.DataFrame(
data=major_heatmap_data,
index=major_mutation_rates,
columns=range(max_generation + 1)
)
# crossbreed_segments
# Data structure to store major mutation rates, generations, and scores
crossbreed_segments_data = defaultdict(lambda: defaultdict(list))
# Populate the dictionary with scores indexed by major mutation rate and generation
# This is assuming the structure to retrieve major_mutation_rate is similar to minor_mutation_rate
for node in right_nodes:
node_val = node["val"]["node"]
if node_val:
scores = node_val["scores"]
crossbreed_segments = node_val["crossbreed_segments"]
generation = node_val["generation"]
for gen_index, score_list in enumerate(scores):
for score in score_list.values():
crossbreed_segments_data[crossbreed_segments][gen_index].append(score)
# Prepare the heatmap data for crossbreed_segments similar to minor_mutation_rate
crossbreed_heatmap_data = np.full((len(crossbreed_segments_data), max_generation + 1), np.nan)
crossbreed_segments = sorted(crossbreed_segments_data.keys())
for i, crossbreed_segment in enumerate(crossbreed_segments):
for generation in range(max_generation + 1):
scores = crossbreed_segments_data[crossbreed_segment][generation]
if scores: # Check if there are scores for this generation
crossbreed_heatmap_data[i, generation] = np.mean(scores)
# Creating a DataFrame for the major mutation rate heatmap
df_crossbreed_heatmap = pd.DataFrame(
data=crossbreed_heatmap_data,
index=crossbreed_segments,
columns=range(max_generation + 1)
)
# mutation_weight_range
# Data structure to store major mutation rates, generations, and scores
mutation_weight_range_data = defaultdict(lambda: defaultdict(list))
# Populate the dictionary with scores indexed by major mutation rate and generation
# This is assuming the structure to retrieve major_mutation_rate is similar to minor_mutation_rate
for node in right_nodes:
node_val = node["val"]["node"]
if node_val:
scores = node_val["scores"]
mutation_weight_range = node_val["mutation_weight_range"]
positive_extent = mutation_weight_range["end"]
negative_extent = -mutation_weight_range["start"]
mutation_weight_range = (positive_extent + negative_extent) / 2
generation = node_val["generation"]
for gen_index, score_list in enumerate(scores):
for score in score_list.values():
mutation_weight_range_data[mutation_weight_range][gen_index].append(score)
# Prepare the heatmap data for crossbreed_segments similar to minor_mutation_rate
mutation_weight_range_heatmap_data = np.full((len(mutation_weight_range_data), max_generation + 1), np.nan)
mutation_weight_ranges = sorted(mutation_weight_range_data.keys())
for i, mutation_weight_range in enumerate(mutation_weight_ranges):
for generation in range(max_generation + 1):
scores = mutation_weight_range_data[mutation_weight_range][generation]
if scores: # Check if there are scores for this generation
mutation_weight_range_heatmap_data[i, generation] = np.mean(scores)
# Creating a DataFrame for the major mutation rate heatmap
df_mutation_weight_range_heatmap = pd.DataFrame(
data=mutation_weight_range_heatmap_data,
index=mutation_weight_ranges,
columns=range(max_generation + 1)
)
# weight_initialization_range
# Data structure to store major mutation rates, generations, and scores
weight_initialization_range_data = defaultdict(lambda: defaultdict(list))
# Populate the dictionary with scores indexed by major mutation rate and generation
# This is assuming the structure to retrieve major_mutation_rate is similar to minor_mutation_rate
for node in right_nodes:
node_val = node["val"]["node"]
if node_val:
scores = node_val["scores"]
weight_initialization_range = node_val["weight_initialization_range"]
positive_extent = weight_initialization_range["end"]
negative_extent = -weight_initialization_range["start"]
weight_initialization_range = (positive_extent + negative_extent) / 2
generation = node_val["generation"]
for gen_index, score_list in enumerate(scores):
for score in score_list.values():
weight_initialization_range_data[weight_initialization_range][gen_index].append(score)
# Prepare the heatmap data for crossbreed_segments similar to minor_mutation_rate
weight_initialization_range_heatmap_data = np.full((len(weight_initialization_range_data), max_generation + 1), np.nan)
weight_initialization_ranges = sorted(weight_initialization_range_data.keys())
for i, weight_initialization_range in enumerate(weight_initialization_ranges):
for generation in range(max_generation + 1):
scores = weight_initialization_range_data[weight_initialization_range][generation]
if scores: # Check if there are scores for this generation
weight_initialization_range_heatmap_data[i, generation] = np.mean(scores)
# Creating a DataFrame for the major mutation rate heatmap
df_weight_initialization_range_heatmap = pd.DataFrame(
data=weight_initialization_range_heatmap_data,
index=weight_initialization_ranges,
columns=range(max_generation + 1)
)
# weight_initialization_range_skew
# Data structure to store major mutation rates, generations, and scores
weight_initialization_range_skew_data = defaultdict(lambda: defaultdict(list))
# Populate the dictionary with scores indexed by major mutation rate and generation
# This is assuming the structure to retrieve major_mutation_rate is similar to minor_mutation_rate
for node in right_nodes:
node_val = node["val"]["node"]
if node_val:
scores = node_val["scores"]
weight_initialization_range = node_val["weight_initialization_range"]
positive_extent = weight_initialization_range["end"]
negative_extent = -weight_initialization_range["start"]
weight_initialization_range_skew = (positive_extent - negative_extent) / 2
generation = node_val["generation"]
for gen_index, score_list in enumerate(scores):
for score in score_list.values():
weight_initialization_range_skew_data[weight_initialization_range_skew][gen_index].append(score)
# Prepare the heatmap data for crossbreed_segments similar to minor_mutation_rate
weight_initialization_range_skew_heatmap_data = np.full((len(weight_initialization_range_skew_data), max_generation + 1), np.nan)
weight_initialization_range_skews = sorted(weight_initialization_range_skew_data.keys())
for i, weight_initialization_range_skew in enumerate(weight_initialization_range_skews):
for generation in range(max_generation + 1):
scores = weight_initialization_range_skew_data[weight_initialization_range_skew][generation]
if scores: # Check if there are scores for this generation
weight_initialization_range_skew_heatmap_data[i, generation] = np.mean(scores)
# Creating a DataFrame for the major mutation rate heatmap
df_weight_initialization_range_skew_heatmap = pd.DataFrame(
data=weight_initialization_range_skew_heatmap_data,
index=weight_initialization_range_skews,
columns=range(max_generation + 1)
)
# Analyze number of neurons correlation to score
# We can get the number of neurons via node_val["nn_shapes"] which contains an array of maps
# Each map has a key for the individual id and a value which is an array of integers representing the number of neurons in each layer
# We can use the individual id to get the score from the scores array
# We then generate a density map of the number of neurons vs the score
neuron_number_score_data = defaultdict(lambda: defaultdict(list))
for node in right_nodes:
node_val = node["val"]["node"]
if node_val:
scores = node_val["scores"]
nn_shapes = node_val["nn_shapes"]
# Both scores and nn_shapes are arrays where score is 1 less in length than nn_shapes (each index corresponds to a generation)
for gen_index, score in enumerate(scores):
for individual_id, nn_shape in nn_shapes[gen_index].items():
neuron_number = sum(nn_shape)
# check if score has a value for the individual id
if individual_id not in score:
continue
neuron_number_score_data[neuron_number][gen_index].append(score[individual_id])
# prepare the density map data
neuron_number_score_heatmap_data = np.full((len(neuron_number_score_data), max_generation + 1), np.nan)
neuron_numbers = sorted(neuron_number_score_data.keys())
for i, neuron_number in enumerate(neuron_numbers):
for generation in range(max_generation + 1):
scores = neuron_number_score_data[neuron_number][generation]
if scores: # Check if there are scores for this generation
neuron_number_score_heatmap_data[i, generation] = np.mean(scores)
# Creating a DataFrame for the major mutation rate heatmap
df_neuron_number_score_heatmap = pd.DataFrame(
data=neuron_number_score_heatmap_data,
index=neuron_numbers,
columns=range(max_generation + 1)
)
# Analyze number of layers correlation to score
nn_layers_score_data = defaultdict(lambda: defaultdict(list))
for node in right_nodes:
node_val = node["val"]["node"]
if node_val:
scores = node_val["scores"]
nn_shapes = node_val["nn_shapes"]
# Both scores and nn_shapes are arrays where score is 1 less in length than nn_shapes (each index corresponds to a generation)
for gen_index, score in enumerate(scores):
for individual_id, nn_shape in nn_shapes[gen_index].items():
layer_number = len(nn_shape)
# check if score has a value for the individual id
if individual_id not in score:
continue
nn_layers_score_data[layer_number][gen_index].append(score[individual_id])
# prepare the density map data
nn_layers_score_heatmap_data = np.full((len(nn_layers_score_data), max_generation + 1), np.nan)
nn_layers = sorted(nn_layers_score_data.keys())
for i, nn_layer in enumerate(nn_layers):
for generation in range(max_generation + 1):
scores = nn_layers_score_data[nn_layer][generation]
if scores: # Check if there are scores for this generation
nn_layers_score_heatmap_data[i, generation] = np.mean(scores)
# Creating a DataFrame for the major mutation rate heatmap
df_nn_layers_score_heatmap = pd.DataFrame(
data=nn_layers_score_heatmap_data,
index=nn_layers,
columns=range(max_generation + 1)
)
# print("Format: ", custom_formatter(0.123498761234, 0))
# Creating subplots
fig, axs = plt.subplots(2, 2, figsize=(20, 14)) # Creates a 3x2 grid of subplots
# Plotting the minor mutation rate heatmap
sns.heatmap(df_heatmap.T, cmap='viridis', fmt=".4g", cbar_kws={'label': 'Mean Score'}, ax=axs[0, 0])
# axs[0, 0].set_title('Minor Mutation Rate')
axs[0, 0].set_xlabel('Minor Mutation Rate')
axs[0, 0].set_ylabel('Generation')
axs[0, 0].invert_yaxis()
# Plotting the major mutation rate heatmap
sns.heatmap(df_major_heatmap.T, cmap='viridis', fmt=".4g", cbar_kws={'label': 'Mean Score'}, ax=axs[0, 1])
# axs[0, 1].set_title('Major Mutation Rate')
axs[0, 1].set_xlabel('Major Mutation Rate')
axs[0, 1].invert_yaxis()
# Plotting the crossbreed_segments heatmap
sns.heatmap(df_crossbreed_heatmap.T, cmap='viridis', fmt=".4g", cbar_kws={'label': 'Mean Score'}, ax=axs[1, 0])
# axs[1, 0].set_title('Crossbreed Segments')
axs[1, 0].set_xlabel('Crossbreed Segments')
axs[1, 0].set_ylabel('Generation')
axs[1, 0].invert_yaxis()
# Plotting the mutation_weight_range heatmap
sns.heatmap(df_mutation_weight_range_heatmap.T, cmap='viridis', fmt=".4g", cbar_kws={'label': 'Mean Score'}, ax=axs[1, 1])
# axs[1, 1].set_title('Mutation Weight Range')
axs[1, 1].set_xlabel('Mutation Weight Range')
axs[1, 1].invert_yaxis()
fig3, axs3 = plt.subplots(1, 2, figsize=(20, 14)) # Creates a 3x2 grid of subplots
# Plotting the weight_initialization_range heatmap
sns.heatmap(df_weight_initialization_range_heatmap.T, cmap='viridis', fmt=".4g", cbar_kws={'label': 'Mean Score'}, ax=axs3[0])
# axs[2, 0].set_title('Weight Initialization Range')
axs3[0].set_xlabel('Weight Initialization Range')
axs3[0].set_ylabel('Generation')
axs3[0].invert_yaxis()
# Plotting the weight_initialization_range_skew heatmap
sns.heatmap(df_weight_initialization_range_skew_heatmap.T, cmap='viridis', fmt=".4g", cbar_kws={'label': 'Mean Score'}, ax=axs3[1])
# axs[2, 1].set_title('Weight Initialization Range Skew')
axs3[1].set_xlabel('Weight Initialization Range Skew')
axs3[1].set_ylabel('Generation')
axs3[1].invert_yaxis()
# Creating a new window for the scatter plots
fig2, axs2 = plt.subplots(2, 1, figsize=(20, 14)) # Creates a 2x1 grid of subplots
# Plotting the neuron number vs score heatmap
sns.heatmap(df_neuron_number_score_heatmap.T, cmap='viridis', fmt=".4g", cbar_kws={'label': 'Mean Score'}, ax=axs2[1])
# axs[3, 1].set_title('Neuron Number vs. Score')
axs2[1].set_xlabel('Neuron Number')
axs2[1].set_ylabel('Generation')
axs2[1].invert_yaxis()
# Plotting the number of layers vs score heatmap
sns.heatmap(df_nn_layers_score_heatmap.T, cmap='viridis', fmt=".4g", cbar_kws={'label': 'Mean Score'}, ax=axs2[0])
# axs[3, 1].set_title('Number of Layers vs. Score')
axs2[0].set_xlabel('Number of Layers')
axs2[0].set_ylabel('Generation')
axs2[0].invert_yaxis()
# Display the plot
plt.tight_layout() # Adjusts the subplots to fit into the figure area.
plt.show()

118
visualize_networks.py Normal file
View file

@ -0,0 +1,118 @@
import matplotlib.pyplot as plt
import networkx as nx
import subprocess
import tkinter as tk
from tkinter import filedialog
def select_file():
root = tk.Tk()
root.withdraw() # Hide the main window
file_path = filedialog.askopenfilename(
initialdir="/", # Set the initial directory to search for files
title="Select file",
filetypes=(("Net files", "*.net"), ("All files", "*.*"))
)
return file_path
def get_fann_data(network_file):
# Adjust the path to the Rust executable as needed
result = subprocess.run(['./extract_fann_data/target/debug/extract_fann_data.exe', network_file], capture_output=True, text=True)
if result.returncode != 0:
print("Error:", result.stderr)
return None, None
layer_sizes = []
connections = []
parsing_connections = False
for line in result.stdout.splitlines():
if line.startswith("Layers:"):
continue
elif line.startswith("Connections:"):
parsing_connections = True
continue
if parsing_connections:
from_neuron, to_neuron, weight = map(float, line.split())
connections.append((int(from_neuron), int(to_neuron), weight))
else:
layer_size, bias_count = map(int, line.split())
layer_sizes.append((layer_size, bias_count))
return layer_sizes, connections
def visualize_fann_network(network_file):
# Get network data
layer_sizes, connections = get_fann_data(network_file)
if layer_sizes is None or connections is None:
return # Error handling in get_fann_data should provide error output
# Create a directed graph
G = nx.DiGraph()
# Positions dictionary to hold the position of each neuron
pos = {}
node_count = 0
x_spacing = 1.0
y_spacing = 1.0
# Calculate the maximum layer size for proper spacing
max_layer_size = max(size for size, bias in layer_sizes)
# Build nodes and position them layer by layer from left to right
for layer_index, (layer_size, bias_count) in enumerate(layer_sizes):
y_positions = list(range(-layer_size-bias_count+1, 1, 1)) # Center-align vertically
y_positions = [y * (max_layer_size / (layer_size + bias_count)) * y_spacing for y in y_positions] # Adjust spacing
for neuron_index in range(layer_size + bias_count): # Include bias neurons
node_label = f"L{layer_index}N{neuron_index}"
G.add_node(node_count, label=node_label)
pos[node_count] = (layer_index * x_spacing, y_positions[neuron_index % len(y_positions)])
node_count += 1
# Add connections to the graph
for from_neuron, to_neuron, weight in connections:
G.add_edge(from_neuron, to_neuron, weight=weight)
max_weight = max(abs(weight) for _, _, weight in connections)
print(f"Max weight: {max_weight}")
# Draw nodes
nx.draw_networkx_nodes(G, pos, node_color='skyblue', node_size=200)
nx.draw_networkx_labels(G, pos, font_size=7)
# Custom function for edge properties
def adjust_properties(weight):
# if weight > 0:
# print("Weight:", weight)
color = 'green' if weight > 0 else 'red'
alpha = min((abs(weight) / max_weight) ** 3, 1)
# print(f"Color: {color}, Alpha: {alpha}")
return color, alpha
# Draw edges with custom properties
for u, v, d in G.edges(data=True):
color, alpha = adjust_properties(d['weight'])
nx.draw_networkx_edges(G, pos, edgelist=[(u, v)], edge_color=color, alpha=alpha, width=1.5, arrows=False)
# Show plot
plt.title('FANN Network Visualization')
plt.axis('off') # Turn off the axis
plt.show()
# Path to the FANN network file
fann_path = 'F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations\\fighter_nn_4f2be613-ab26-4384-9a65-450e043984ea\\6\\4f2be613-ab26-4384-9a65-450e043984ea_fighter_nn_0.net'
# fann_path = "F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations\\fighter_nn_fc294503-7b2a-40f8-be59-ccc486eb3f79\\0\\fc294503-7b2a-40f8-be59-ccc486eb3f79_fighter_nn_0.net"
# fann_path = 'F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations\\fighter_nn_99c30a7f-40ab-4faf-b16a-b44703fdb6cd\\0\\99c30a7f-40ab-4faf-b16a-b44703fdb6cd_fighter_nn_0.net'
# Has a 4 layer network
# # Generation 1
# fann_path = "F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations\\fighter_nn_16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98\\1\\16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98_fighter_nn_0.net"
# # Generation 5
# fann_path = "F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations\\fighter_nn_16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98\\5\\16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98_fighter_nn_0.net"
# # Generation 10
# fann_path = "F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations\\fighter_nn_16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98\\10\\16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98_fighter_nn_0.net"
# # Generation 20
# fann_path = "F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations\\fighter_nn_16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98\\20\\16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98_fighter_nn_0.net"
# # Generation 32
# fann_path = "F:\\\\vandomej\\Projects\\dootcamp-AI-Simulation\\Simulations\\fighter_nn_16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98\\32\\16dfa1b4-03c7-45a6-84b4-22fe3c8e2d98_fighter_nn_0.net"
fann_path = select_file()
visualize_fann_network(fann_path)

View file

@ -36,7 +36,7 @@ def hierarchy_pos(G, root=None, width=1., vert_gap=0.2, vert_loc=0, xcenter=0.5)
return _hierarchy_pos(G, root, width, vert_gap, vert_loc, xcenter)
# Simplified JSON data for demonstration
with open('gemla/round2.json', 'r') as file:
with open('gemla/round4.json', 'r') as file:
simplified_json_data = json.load(file)
# Function to traverse the tree and create a graph
@ -68,7 +68,7 @@ def traverse(node, graph, parent=None):
# print debug statement
# print(f"Node {node_id}: Max score: {overall_max_score:.6f} (Individual {overall_max_score_individual} in Gen {overall_max_score_gen})")
# print(f"Left: {node.get('left')}, Right: {node.get('right')}")
label = f"{node_id}\nGenerations: {generations}, Population: {population_size}\nMax score: {overall_max_score:.6f} (Individual {overall_max_score_individual} in Gen {overall_max_score_gen})"
label = f"{node_id}\nGenerations: {generations}, Population: {population_size}\nMax score: {overall_max_score:.6f} (Individual {overall_max_score_individual} in Gen {overall_max_score_gen + 1 if overall_max_score_gen is not None else 'N/A'})"
else:
label = node_id