Finished round 2 adjustments
This commit is contained in:
parent
a11def630a
commit
98803b3700
7 changed files with 236 additions and 73 deletions
|
@ -65,6 +65,9 @@ def traverse(node, graph, parent=None):
|
||||||
overall_max_score_individual = individual_with_max_score_for_gen
|
overall_max_score_individual = individual_with_max_score_for_gen
|
||||||
overall_max_score_gen = gen
|
overall_max_score_gen = gen
|
||||||
|
|
||||||
|
# print debug statement
|
||||||
|
# print(f"Node {node_id}: Max score: {overall_max_score:.6f} (Individual {overall_max_score_individual} in Gen {overall_max_score_gen})")
|
||||||
|
# print(f"Left: {node.get('left')}, Right: {node.get('right')}")
|
||||||
label = f"{node_id}\nGenerations: {generations}, Population: {population_size}\nMax score: {overall_max_score:.6f} (Individual {overall_max_score_individual} in Gen {overall_max_score_gen})"
|
label = f"{node_id}\nGenerations: {generations}, Population: {population_size}\nMax score: {overall_max_score:.6f} (Individual {overall_max_score_individual} in Gen {overall_max_score_gen})"
|
||||||
else:
|
else:
|
||||||
label = node_id
|
label = node_id
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
fn main() {
|
fn main() {
|
||||||
// Replace this with the path to the directory containing `fann.lib`
|
// Replace this with the path to the directory containing `fann.lib`
|
||||||
let lib_dir = "/opt/homebrew/Cellar/fann/2.2.0/lib";
|
let lib_dir = "F://vandomej/Downloads/vcpkg/packages/fann_x64-windows/lib";
|
||||||
|
|
||||||
println!("cargo:rustc-link-search=native={}", lib_dir);
|
println!("cargo:rustc-link-search=native={}", lib_dir);
|
||||||
println!("cargo:rustc-link-lib=dylib=fann");
|
println!("cargo:rustc-link-lib=static=fann");
|
||||||
// Use `dylib=fann` instead of `static=fann` if you're linking dynamically
|
// Use `dylib=fann` instead of `static=fann` if you're linking dynamically
|
||||||
|
|
||||||
// If there are any additional directories where the compiler can find header files, you can specify them like this:
|
// If there are any additional directories where the compiler can find header files, you can specify them like this:
|
||||||
|
|
1
gemla/round2.json
Normal file
1
gemla/round2.json
Normal file
File diff suppressed because one or more lines are too long
|
@ -45,11 +45,12 @@ impl Serialize for FighterContext {
|
||||||
|
|
||||||
// Custom deserialization to reconstruct the FighterContext from a concurrency limit.
|
// Custom deserialization to reconstruct the FighterContext from a concurrency limit.
|
||||||
impl<'de> Deserialize<'de> for FighterContext {
|
impl<'de> Deserialize<'de> for FighterContext {
|
||||||
fn deserialize<D>(_: D) -> Result<Self, D::Error>
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
where
|
where
|
||||||
D: Deserializer<'de>,
|
D: Deserializer<'de>,
|
||||||
{
|
{
|
||||||
// Deserialize the tuple
|
// Deserialize the tuple
|
||||||
|
let (_, _) = <(usize, usize)>::deserialize(deserializer)?;
|
||||||
Ok(FighterContext {
|
Ok(FighterContext {
|
||||||
shared_semaphore: Arc::new(Semaphore::new(SHARED_SEMAPHORE_CONCURRENCY_LIMIT)),
|
shared_semaphore: Arc::new(Semaphore::new(SHARED_SEMAPHORE_CONCURRENCY_LIMIT)),
|
||||||
visible_simulations: Arc::new(Semaphore::new(VISIBLE_SIMULATIONS_CONCURRENCY_LIMIT)),
|
visible_simulations: Arc::new(Semaphore::new(VISIBLE_SIMULATIONS_CONCURRENCY_LIMIT)),
|
||||||
|
|
|
@ -103,7 +103,9 @@ impl GeneticNode for FighterNN {
|
||||||
// Create the first generation in this folder
|
// Create the first generation in this folder
|
||||||
for i in 0..POPULATION {
|
for i in 0..POPULATION {
|
||||||
// Filenames are stored in the format of "xxxxxx_fighter_nn_0.net", "xxxxxx_fighter_nn_1.net", etc. Where xxxxxx is the folder name
|
// Filenames are stored in the format of "xxxxxx_fighter_nn_0.net", "xxxxxx_fighter_nn_1.net", etc. Where xxxxxx is the folder name
|
||||||
let nn = gen_folder.join(format!("{:06}_fighter_nn_{}.net", context.id, i));
|
let nn = gen_folder
|
||||||
|
.join(format!("{:06}_fighter_nn_{}", context.id, i))
|
||||||
|
.with_extension("net");
|
||||||
|
|
||||||
// Randomly generate a neural network shape based on constants
|
// Randomly generate a neural network shape based on constants
|
||||||
let hidden_layers = thread_rng()
|
let hidden_layers = thread_rng()
|
||||||
|
@ -168,7 +170,8 @@ impl GeneticNode for FighterNN {
|
||||||
let nn = self_clone
|
let nn = self_clone
|
||||||
.folder
|
.folder
|
||||||
.join(format!("{}", self_clone.generation))
|
.join(format!("{}", self_clone.generation))
|
||||||
.join(self_clone.get_individual_id(i as u64));
|
.join(self_clone.get_individual_id(i as u64))
|
||||||
|
.with_extension("net");
|
||||||
let mut simulations = Vec::new();
|
let mut simulations = Vec::new();
|
||||||
|
|
||||||
// Using the same original nn, repeat the simulation with 5 random nn's from the current generation concurrently
|
// Using the same original nn, repeat the simulation with 5 random nn's from the current generation concurrently
|
||||||
|
@ -181,7 +184,8 @@ impl GeneticNode for FighterNN {
|
||||||
|
|
||||||
let random_nn = folder
|
let random_nn = folder
|
||||||
.join(format!("{}", generation))
|
.join(format!("{}", generation))
|
||||||
.join(self_clone.get_individual_id(random_nn_index as u64));
|
.join(self_clone.get_individual_id(random_nn_index as u64))
|
||||||
|
.with_extension("net");
|
||||||
let nn_clone = nn.clone(); // Clone the path to use in the async block
|
let nn_clone = nn.clone(); // Clone the path to use in the async block
|
||||||
|
|
||||||
let future = async move {
|
let future = async move {
|
||||||
|
@ -250,6 +254,7 @@ impl GeneticNode for FighterNN {
|
||||||
|
|
||||||
async fn mutate(&mut self, _context: GeneticNodeContext<Self::Context>) -> Result<(), Error> {
|
async fn mutate(&mut self, _context: GeneticNodeContext<Self::Context>) -> Result<(), Error> {
|
||||||
let survivor_count = (self.population_size as f32 * SURVIVAL_RATE) as usize;
|
let survivor_count = (self.population_size as f32 * SURVIVAL_RATE) as usize;
|
||||||
|
let mut nn_sizes = Vec::new();
|
||||||
|
|
||||||
// Create the new generation folder
|
// Create the new generation folder
|
||||||
let new_gen_folder = self.folder.join(format!("{}", self.generation + 1));
|
let new_gen_folder = self.folder.join(format!("{}", self.generation + 1));
|
||||||
|
@ -262,11 +267,10 @@ impl GeneticNode for FighterNN {
|
||||||
|
|
||||||
// Remove the 5 nn's with the lowest scores
|
// Remove the 5 nn's with the lowest scores
|
||||||
let mut sorted_scores: Vec<_> = self.scores[self.generation as usize].iter().collect();
|
let mut sorted_scores: Vec<_> = self.scores[self.generation as usize].iter().collect();
|
||||||
sorted_scores.sort_by(|a, b| a.1.partial_cmp(b.1).unwrap());
|
sorted_scores.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap());
|
||||||
let to_keep = sorted_scores[survivor_count..]
|
let scores_to_keep: Vec<&(&u64, &f32)> =
|
||||||
.iter()
|
sorted_scores.iter().take(survivor_count).collect();
|
||||||
.map(|(k, _)| *k)
|
let to_keep = scores_to_keep.iter().map(|(k, _)| *k).collect::<Vec<_>>();
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Save the remaining 5 nn's to the new generation folder
|
// Save the remaining 5 nn's to the new generation folder
|
||||||
for (i, nn_id) in to_keep.iter().enumerate().take(survivor_count) {
|
for (i, nn_id) in to_keep.iter().enumerate().take(survivor_count) {
|
||||||
|
@ -275,58 +279,94 @@ impl GeneticNode for FighterNN {
|
||||||
.join(format!("{}", self.generation))
|
.join(format!("{}", self.generation))
|
||||||
.join(format!("{:06}_fighter_nn_{}.net", self.id, nn_id));
|
.join(format!("{:06}_fighter_nn_{}.net", self.id, nn_id));
|
||||||
let new_nn = new_gen_folder.join(format!("{:06}_fighter_nn_{}.net", self.id, i));
|
let new_nn = new_gen_folder.join(format!("{:06}_fighter_nn_{}.net", self.id, i));
|
||||||
|
debug!("Copying nn from {:?} to {:?}", nn_id, i);
|
||||||
fs::copy(&nn, &new_nn)?;
|
fs::copy(&nn, &new_nn)?;
|
||||||
|
nn_sizes.push(self.nn_shapes.get(nn_id).unwrap().clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take the remaining 5 nn's and create 5 new nn's by the following:
|
let weights: HashMap<u64, f32> = scores_to_keep.iter().map(|(k, v)| (**k, **v)).collect();
|
||||||
|
|
||||||
|
debug!("scores: {:?}", scores_to_keep);
|
||||||
|
|
||||||
|
let mut tasks = Vec::new();
|
||||||
|
|
||||||
|
// Take the remaining nn's and create new nn's by the following:
|
||||||
for i in 0..survivor_count {
|
for i in 0..survivor_count {
|
||||||
let nn_id = to_keep[i];
|
let self_clone = self.clone();
|
||||||
let nn = self
|
|
||||||
|
// randomly select individual id's sorted scores proportional to their score
|
||||||
|
let nn_id = weighted_random_selection(&weights);
|
||||||
|
let nn = self_clone
|
||||||
.folder
|
.folder
|
||||||
.join(format!("{}", self.generation))
|
.join(format!("{}", self_clone.generation))
|
||||||
.join(format!("{:06}_fighter_nn_{}.net", self.id, nn_id));
|
.join(self_clone.get_individual_id(nn_id))
|
||||||
let fann = Fann::from_file(&nn).with_context(|| "Failed to load nn")?;
|
.with_extension("net");
|
||||||
|
|
||||||
// Load another nn from the current generation and cross breed it with the current nn
|
// Load another nn from the current generation and cross breed it with the current nn
|
||||||
let cross_nn = self
|
let cross_id = loop {
|
||||||
.folder
|
let cross_id = weighted_random_selection(&weights);
|
||||||
.join(format!("{}", self.generation))
|
if cross_id != nn_id {
|
||||||
.join(format!(
|
break cross_id;
|
||||||
"{:06}_fighter_nn_{}.net",
|
|
||||||
self.id,
|
|
||||||
to_keep[thread_rng().gen_range(0..survivor_count)]
|
|
||||||
));
|
|
||||||
let cross_fann =
|
|
||||||
Fann::from_file(&cross_nn).with_context(|| "Failed to load cross nn")?;
|
|
||||||
|
|
||||||
let mut new_fann = crossbreed(self, &fann, &cross_fann, self.crossbreed_segments)?;
|
|
||||||
|
|
||||||
// For each weight in the 5 new nn's there is a 20% chance of a minor mutation (a random number between -0.1 and 0.1 is added to the weight)
|
|
||||||
// And a 5% chance of a major mutation a new neuron is randomly added to a hidden layer
|
|
||||||
let mut connections = new_fann.get_connections(); // Vector of connections
|
|
||||||
for c in &mut connections {
|
|
||||||
if thread_rng().gen_range(0.0..1.0) < self.minor_mutation_rate {
|
|
||||||
trace!("Minor mutation on connection {:?}", c);
|
|
||||||
c.weight += thread_rng().gen_range(self.weight_initialization_range.clone());
|
|
||||||
trace!("New weight: {}", c.weight);
|
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
new_fann.set_connections(&connections);
|
let cross_nn = self_clone
|
||||||
|
.folder
|
||||||
|
.join(format!("{}", self_clone.generation))
|
||||||
|
.join(self_clone.get_individual_id(cross_id))
|
||||||
|
.with_extension("net");
|
||||||
|
|
||||||
if thread_rng().gen_range(0.0..1.0) < self.major_mutation_rate {
|
let new_gen_folder = new_gen_folder.clone();
|
||||||
new_fann = major_mutation(&new_fann, self.weight_initialization_range.clone())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the new nn's to the new generation folder
|
let future = tokio::task::spawn_blocking(move || -> Result<Vec<u32>, Error> {
|
||||||
let new_nn = new_gen_folder.join(format!(
|
let fann = Fann::from_file(&nn).with_context(|| "Failed to load nn")?;
|
||||||
"{:06}_fighter_nn_{}.net",
|
let cross_fann =
|
||||||
self.id,
|
Fann::from_file(&cross_nn).with_context(|| "Failed to load cross nn")?;
|
||||||
i + survivor_count
|
|
||||||
));
|
let mut new_fann = crossbreed(
|
||||||
new_fann
|
&self_clone,
|
||||||
.save(&new_nn)
|
&fann,
|
||||||
.with_context(|| "Failed to save nn")?;
|
&cross_fann,
|
||||||
|
self_clone.crossbreed_segments,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// For each weight in the 5 new nn's there is a 20% chance of a minor mutation (a random number between -0.1 and 0.1 is added to the weight)
|
||||||
|
// And a 5% chance of a major mutation a new neuron is randomly added to a hidden layer
|
||||||
|
let mut connections = new_fann.get_connections(); // Vector of connections
|
||||||
|
for c in &mut connections {
|
||||||
|
if thread_rng().gen_range(0.0..1.0) < self_clone.minor_mutation_rate {
|
||||||
|
trace!("Minor mutation on connection {:?}", c);
|
||||||
|
c.weight +=
|
||||||
|
thread_rng().gen_range(self_clone.weight_initialization_range.clone());
|
||||||
|
trace!("New weight: {}", c.weight);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
new_fann.set_connections(&connections);
|
||||||
|
|
||||||
|
if thread_rng().gen_range(0.0..1.0) < self_clone.major_mutation_rate {
|
||||||
|
new_fann =
|
||||||
|
major_mutation(&new_fann, self_clone.weight_initialization_range.clone())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let new_nn = new_gen_folder
|
||||||
|
.join(self_clone.get_individual_id((i + survivor_count) as u64))
|
||||||
|
.with_extension("net");
|
||||||
|
new_fann
|
||||||
|
.save(&new_nn)
|
||||||
|
.with_context(|| "Failed to save nn")?;
|
||||||
|
|
||||||
|
Ok::<Vec<u32>, Error>(new_fann.get_layer_sizes())
|
||||||
|
});
|
||||||
|
|
||||||
|
tasks.push(future);
|
||||||
|
}
|
||||||
|
|
||||||
|
let results = join_all(tasks).await;
|
||||||
|
|
||||||
|
for result in results.into_iter() {
|
||||||
|
let new_size = result.with_context(|| "Failed to create new nn")??;
|
||||||
|
nn_sizes.push(new_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.generation += 1;
|
self.generation += 1;
|
||||||
|
@ -351,7 +391,7 @@ impl GeneticNode for FighterNN {
|
||||||
let get_highest_scores = |fighter: &FighterNN| -> Vec<(u64, f32)> {
|
let get_highest_scores = |fighter: &FighterNN| -> Vec<(u64, f32)> {
|
||||||
let mut sorted_scores: Vec<_> =
|
let mut sorted_scores: Vec<_> =
|
||||||
fighter.scores[fighter.generation as usize].iter().collect();
|
fighter.scores[fighter.generation as usize].iter().collect();
|
||||||
sorted_scores.sort_by(|a, b| a.1.partial_cmp(b.1).unwrap());
|
sorted_scores.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap());
|
||||||
sorted_scores
|
sorted_scores
|
||||||
.iter()
|
.iter()
|
||||||
.take(fighter.population_size / 2)
|
.take(fighter.population_size / 2)
|
||||||
|
@ -367,18 +407,25 @@ impl GeneticNode for FighterNN {
|
||||||
|
|
||||||
let mut simulations = Vec::new();
|
let mut simulations = Vec::new();
|
||||||
|
|
||||||
for _ in 0..max(left.population_size, right.population_size) * SIMULATION_ROUNDS {
|
let left_weights: HashMap<u64, f32> = left_scores.iter().map(|(k, v)| (*k, *v)).collect();
|
||||||
let left_nn_id = left_scores[thread_rng().gen_range(0..left_scores.len())].0;
|
let right_weights: HashMap<u64, f32> = right_scores.iter().map(|(k, v)| (*k, *v)).collect();
|
||||||
let right_nn_id = right_scores[thread_rng().gen_range(0..right_scores.len())].0;
|
|
||||||
|
let num_simulations = max(left.population_size, right.population_size) * SIMULATION_ROUNDS;
|
||||||
|
|
||||||
|
for _ in 0..num_simulations {
|
||||||
|
let left_nn_id = weighted_random_selection(&left_weights);
|
||||||
|
let right_nn_id = weighted_random_selection(&right_weights);
|
||||||
|
|
||||||
let left_nn_path = left
|
let left_nn_path = left
|
||||||
.folder
|
.folder
|
||||||
.join(left.generation.to_string())
|
.join(left.generation.to_string())
|
||||||
.join(left.get_individual_id(left_nn_id));
|
.join(left.get_individual_id(left_nn_id))
|
||||||
|
.with_extension("net");
|
||||||
let right_nn_path = right
|
let right_nn_path = right
|
||||||
.folder
|
.folder
|
||||||
.join(right.generation.to_string())
|
.join(right.generation.to_string())
|
||||||
.join(right.get_individual_id(right_nn_id));
|
.join(right.get_individual_id(right_nn_id))
|
||||||
|
.with_extension("net");
|
||||||
let semaphore_clone = gemla_context.shared_semaphore.clone();
|
let semaphore_clone = gemla_context.shared_semaphore.clone();
|
||||||
let display_simulation_semaphore = gemla_context.visible_simulations.clone();
|
let display_simulation_semaphore = gemla_context.visible_simulations.clone();
|
||||||
|
|
||||||
|
@ -414,8 +461,8 @@ impl GeneticNode for FighterNN {
|
||||||
join_all(simulations).await.into_iter().collect();
|
join_all(simulations).await.into_iter().collect();
|
||||||
let scores = results?;
|
let scores = results?;
|
||||||
|
|
||||||
let total_left_score = scores.iter().map(|(l, _)| l).sum::<f32>();
|
let total_left_score = scores.iter().map(|(l, _)| l).sum::<f32>() / num_simulations as f32;
|
||||||
let total_right_score = scores.iter().map(|(_, r)| r).sum::<f32>();
|
let total_right_score = scores.iter().map(|(_, r)| r).sum::<f32>() / num_simulations as f32;
|
||||||
|
|
||||||
debug!("Total left score: {}", total_left_score);
|
debug!("Total left score: {}", total_left_score);
|
||||||
debug!("Total right score: {}", total_right_score);
|
debug!("Total right score: {}", total_right_score);
|
||||||
|
@ -545,6 +592,33 @@ impl FighterNN {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn weighted_random_selection<T: Clone + std::hash::Hash + Eq>(weights: &HashMap<T, f32>) -> T {
|
||||||
|
let mut rng = thread_rng();
|
||||||
|
|
||||||
|
// Identify the minimum weight
|
||||||
|
let min_weight = weights.values().fold(f32::INFINITY, |a, &b| a.min(b));
|
||||||
|
|
||||||
|
// Adjust all weights to be non-negative
|
||||||
|
let offset = if min_weight < 0.0 {
|
||||||
|
(-min_weight) + 0.5
|
||||||
|
} else {
|
||||||
|
0.0
|
||||||
|
};
|
||||||
|
let total_weight: f32 = weights.values().map(|w| w + offset).sum();
|
||||||
|
|
||||||
|
let mut cumulative_weight = 0.0;
|
||||||
|
let random_weight = rng.gen::<f32>() * total_weight;
|
||||||
|
|
||||||
|
for (item, weight) in weights.iter() {
|
||||||
|
cumulative_weight += *weight + offset;
|
||||||
|
if cumulative_weight >= random_weight {
|
||||||
|
return item.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
panic!("Weighted random selection failed.");
|
||||||
|
}
|
||||||
|
|
||||||
async fn run_1v1_simulation(
|
async fn run_1v1_simulation(
|
||||||
nn_path_1: &Path,
|
nn_path_1: &Path,
|
||||||
nn_path_2: &Path,
|
nn_path_2: &Path,
|
||||||
|
@ -602,7 +676,13 @@ async fn run_1v1_simulation(
|
||||||
let config2_arg = format!("-NN2Config=\"{}\"", nn_path_2.to_str().unwrap());
|
let config2_arg = format!("-NN2Config=\"{}\"", nn_path_2.to_str().unwrap());
|
||||||
let disable_unreal_rendering_arg = "-nullrhi".to_string();
|
let disable_unreal_rendering_arg = "-nullrhi".to_string();
|
||||||
|
|
||||||
// debug!("the following command {} {} {} {}", GAME_EXECUTABLE_PATH, config1_arg, config2_arg, disable_unreal_rendering_arg);
|
trace!(
|
||||||
|
"Executing the following command {} {} {} {}",
|
||||||
|
GAME_EXECUTABLE_PATH,
|
||||||
|
config1_arg,
|
||||||
|
config2_arg,
|
||||||
|
disable_unreal_rendering_arg
|
||||||
|
);
|
||||||
|
|
||||||
trace!("Running simulation for {} vs {}", nn_1_id, nn_2_id);
|
trace!("Running simulation for {} vs {}", nn_1_id, nn_2_id);
|
||||||
|
|
||||||
|
@ -696,3 +776,56 @@ async fn read_score_from_file(file_path: &Path, nn_id: &str) -> Result<f32, io::
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub mod test {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_weighted_random_selection() {
|
||||||
|
let weights = vec![
|
||||||
|
(43, -4.0403514),
|
||||||
|
(26, -2.9386168),
|
||||||
|
(44, -2.8106647),
|
||||||
|
(46, -1.3942022),
|
||||||
|
(23, 0.99386656),
|
||||||
|
(41, -2.2198126),
|
||||||
|
(48, 1.2195103),
|
||||||
|
(42, -3.4927247),
|
||||||
|
(7, -1.092067),
|
||||||
|
(0, -0.3878999),
|
||||||
|
(49, -4.156101),
|
||||||
|
(34, -0.33209237),
|
||||||
|
(30, -2.7059758),
|
||||||
|
(2, -2.251783),
|
||||||
|
(20, -0.5811202),
|
||||||
|
(10, -3.047954),
|
||||||
|
(6, -4.3464293),
|
||||||
|
(39, -3.7280478),
|
||||||
|
(1, -3.4291298),
|
||||||
|
(11, -2.0568254),
|
||||||
|
(24, -1.5701149),
|
||||||
|
(8, -1.5029285),
|
||||||
|
(3, -2.4728038),
|
||||||
|
(4, 3.7312133),
|
||||||
|
(25, -1.227466),
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut ids = vec![
|
||||||
|
43, 26, 44, 46, 23, 41, 48, 42, 7, 0, 49, 34, 30, 2, 20, 10, 6, 39, 1, 11, 24, 8, 3, 4,
|
||||||
|
25,
|
||||||
|
];
|
||||||
|
|
||||||
|
for _ in 0..10000 {
|
||||||
|
let id = weighted_random_selection(&weights);
|
||||||
|
|
||||||
|
ids = ids.into_iter().filter(|&x| x != id).collect();
|
||||||
|
|
||||||
|
assert!(weights.contains_key(&id));
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(ids.len(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -9,7 +9,9 @@ use rand::{
|
||||||
thread_rng, Rng,
|
thread_rng, Rng,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{FighterNN, NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MIN};
|
use super::{
|
||||||
|
FighterNN, NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX, NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MIN,
|
||||||
|
};
|
||||||
|
|
||||||
/// Crossbreeds two neural networks of different shapes by finding cut points, and swapping neurons between the two networks.
|
/// Crossbreeds two neural networks of different shapes by finding cut points, and swapping neurons between the two networks.
|
||||||
/// Algorithm tries to ensure similar functionality is maintained between the two networks.
|
/// Algorithm tries to ensure similar functionality is maintained between the two networks.
|
||||||
|
@ -109,6 +111,7 @@ pub fn consolidate_old_connections(
|
||||||
let secondary_shape = secondary.get_layer_sizes();
|
let secondary_shape = secondary.get_layer_sizes();
|
||||||
debug!("Primary shape: {:?}", primary_shape);
|
debug!("Primary shape: {:?}", primary_shape);
|
||||||
debug!("Secondary shape: {:?}", secondary_shape);
|
debug!("Secondary shape: {:?}", secondary_shape);
|
||||||
|
debug!("New shape: {:?}", new_shape);
|
||||||
|
|
||||||
// Start by iterating layer by later
|
// Start by iterating layer by later
|
||||||
let primary_connections = primary.get_connections();
|
let primary_connections = primary.get_connections();
|
||||||
|
@ -564,6 +567,25 @@ pub fn crossbreed_neuron_arrays(
|
||||||
.filter(|&(_, _, layer, _)| layer_counts[layer] >= NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MIN)
|
.filter(|&(_, _, layer, _)| layer_counts[layer] >= NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MIN)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// If a layer has more than NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX, remove the neurons with the highest id
|
||||||
|
for layer in 1..layer_counts.len() - 1 {
|
||||||
|
let new_neurons_clone = new_neurons.clone();
|
||||||
|
let layer_neurons = new_neurons_clone
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, _, l, _)| l == &layer)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if layer_neurons.len() > NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX {
|
||||||
|
let mut sorted_neurons = layer_neurons.clone();
|
||||||
|
// Take primary neurons first, order by highest id
|
||||||
|
sorted_neurons.sort_by(|a, b| a.1.cmp(&b.1).then(a.0.cmp(&b.0)));
|
||||||
|
let neurons_to_remove = sorted_neurons.len() - NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX;
|
||||||
|
for _ in 0..neurons_to_remove {
|
||||||
|
let neuron_to_remove = sorted_neurons.pop().unwrap();
|
||||||
|
new_neurons.retain(|neuron| neuron != neuron_to_remove);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Collect and sort unique layer numbers
|
// Collect and sort unique layer numbers
|
||||||
let mut unique_layers = new_neurons
|
let mut unique_layers = new_neurons
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -606,7 +628,7 @@ pub fn major_mutation(fann: &Fann, weight_initialization_range: Range<f32>) -> R
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
// Determine first whether to add or remove a neuron
|
// Determine first whether to add or remove a neuron
|
||||||
if thread_rng().gen_range(0..2) == 0 {
|
if thread_rng().gen_bool(0.5) {
|
||||||
// To add a neuron we need to create a new fann object with the new layer sizes, then copy the information and connections over
|
// To add a neuron we need to create a new fann object with the new layer sizes, then copy the information and connections over
|
||||||
let max_id = mutated_neurons
|
let max_id = mutated_neurons
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -616,9 +638,12 @@ pub fn major_mutation(fann: &Fann, weight_initialization_range: Range<f32>) -> R
|
||||||
|
|
||||||
// Now we inject the new neuron into mutated_neurons
|
// Now we inject the new neuron into mutated_neurons
|
||||||
let layer = thread_rng().gen_range(1..fann.get_num_layers() - 1) as usize;
|
let layer = thread_rng().gen_range(1..fann.get_num_layers() - 1) as usize;
|
||||||
let new_id = max_id + 1;
|
// Do not add to layer if it would result in more than NEURALNETWORK_HIDDEN_LAYER_SIZE_MAX neurons
|
||||||
mutated_neurons.push((new_id, true, layer, new_id));
|
if mutated_shape[layer] < NEURAL_NETWORK_HIDDEN_LAYER_SIZE_MAX as u32 {
|
||||||
mutated_shape[layer] += 1;
|
let new_id = max_id + 1;
|
||||||
|
mutated_neurons.push((new_id, true, layer, new_id));
|
||||||
|
mutated_shape[layer] += 1;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Remove a neuron
|
// Remove a neuron
|
||||||
let layer = thread_rng().gen_range(1..fann.get_num_layers() - 1) as usize;
|
let layer = thread_rng().gen_range(1..fann.get_num_layers() - 1) as usize;
|
||||||
|
|
|
@ -260,10 +260,10 @@ where
|
||||||
&& r.val.state() == GeneticState::Finish =>
|
&& r.val.state() == GeneticState::Finish =>
|
||||||
{
|
{
|
||||||
info!("Merging nodes {} and {}", l.val.id(), r.val.id());
|
info!("Merging nodes {} and {}", l.val.id(), r.val.id());
|
||||||
if let (Some(left_node), Some(right_node)) = (l.val.take(), r.val.take()) {
|
if let (Some(left_node), Some(right_node)) = (l.val.as_ref(), r.val.as_ref()) {
|
||||||
let merged_node = GeneticNode::merge(
|
let merged_node = GeneticNode::merge(
|
||||||
&left_node,
|
left_node,
|
||||||
&right_node,
|
right_node,
|
||||||
&tree.val.id(),
|
&tree.val.id(),
|
||||||
gemla_context.clone(),
|
gemla_context.clone(),
|
||||||
)
|
)
|
||||||
|
@ -283,9 +283,9 @@ where
|
||||||
(Some(l), None) if l.val.state() == GeneticState::Finish => {
|
(Some(l), None) if l.val.state() == GeneticState::Finish => {
|
||||||
trace!("Copying node {}", l.val.id());
|
trace!("Copying node {}", l.val.id());
|
||||||
|
|
||||||
if let Some(left_node) = l.val.take() {
|
if let Some(left_node) = l.val.as_ref() {
|
||||||
GeneticNodeWrapper::from(
|
GeneticNodeWrapper::from(
|
||||||
left_node,
|
left_node.clone(),
|
||||||
tree.val.max_generations(),
|
tree.val.max_generations(),
|
||||||
tree.val.id(),
|
tree.val.id(),
|
||||||
);
|
);
|
||||||
|
@ -295,9 +295,9 @@ where
|
||||||
(None, Some(r)) if r.val.state() == GeneticState::Finish => {
|
(None, Some(r)) if r.val.state() == GeneticState::Finish => {
|
||||||
trace!("Copying node {}", r.val.id());
|
trace!("Copying node {}", r.val.id());
|
||||||
|
|
||||||
if let Some(right_node) = r.val.take() {
|
if let Some(right_node) = r.val.as_ref() {
|
||||||
tree.val = GeneticNodeWrapper::from(
|
tree.val = GeneticNodeWrapper::from(
|
||||||
right_node,
|
right_node.clone(),
|
||||||
tree.val.max_generations(),
|
tree.val.max_generations(),
|
||||||
tree.val.id(),
|
tree.val.id(),
|
||||||
);
|
);
|
||||||
|
|
Loading…
Add table
Reference in a new issue