Skip to content
Snippets Groups Projects
Commit 02204a2f authored by Štěpán Ošlejšek's avatar Štěpán Ošlejšek
Browse files

HW02 - Constrained optimization

parent 29961c8e
No related branches found
No related tags found
No related merge requests found
......@@ -5,5 +5,10 @@
- Navigagte to a folder `{name-of-you-folder}/EOALib/test/TSP` and run `julia hw01_startup.jl`
- This will open a jupyter notebook menu and open up `hw01.ipynb` file.
## Getting started: HW02 - Constrained optimization
- Make a directory where you will unzip the package and unzip the zipped package there
- Navigagte to a folder `{name-of-you-folder}/EOALib/test/HW2` and run `julia hw02_startup.jl`
- This will open a jupyter notebook menu and open up `hw02.ipynb` file.
......@@ -18,6 +18,11 @@ mutable struct EvolutionStrategy
end
function (es::EvolutionStrategy)(fitness_func::MultiObjectiveFitnessFunction, initial_population::Population)
log_history = Dict{String, Any}()
log_history["best_fitness"] = []
log_history["function_evaluations"] = []
log_history["best_individual"] = []
population = initial_population
population.fitness_values = fitness_func(population)
......@@ -26,10 +31,17 @@ function (es::EvolutionStrategy)(fitness_func::MultiObjectiveFitnessFunction, in
offspring = es.crossover(parents, es.λ)
es.mutation(offspring)
offspring.fitness_values = fitness_func(offspring)
population = es.replacement(parents, offspring)
population = es.replacement(population, offspring)
if generation % 10 == 0
fronts = calculate_non_dominated_fronts(population)
push!(log_history["function_evaluations"], generation * es.λ)
push!(log_history["best_fitness"], population[fronts[1]].fitness_values)
push!(log_history["best_individual"], population[fronts[1]])
end
end
fronts = calculate_non_dominated_fronts(population)
return population[fronts[1]]
return population[fronts[1]], population[fronts[1]].fitness_values, log_history
end
function (es::EvolutionStrategy)(fitness_func::FitnessFunction, initial_population::Population)
......@@ -88,6 +100,10 @@ function (es::EvolutionStrategy)(fitness_func::FitnessFunction, initial_populati
violations = sum([c(population).^2 for c in constraints])
best_individual = copy(population.members[argmin(population.fitness_values[1] + rg*violations)])
best_fitness = minimum(population.fitness_values[1]+rg*violations)
# dont know why this only occurs on jupyter notebook (or I'm just unlucky and it works on local machine :))
if isnan(best_fitness)
best_fitness = Inf
end
feasible_count = 0
infeasible_count = 0
......@@ -127,12 +143,11 @@ function (es::EvolutionStrategy)(fitness_func::FitnessFunction, initial_populati
ranks = get_stochastic_ranks(population+offspring, violations, 0.4)
population = es.replacement(population, offspring, ranks)
# Logging
if generation % 1 == 0
if generation % 10 == 0
push!(log_history["function_evaluations"], generation * es.λ)
push!(log_history["best_fitness"], best_fitness)
push!(log_history["best_individual"], best_individual)
end
# println(best_fitness)
# Constraint penalization
p = Population([best_individual], [[best_fitness]], offspring.lb, offspring.ub)
violations_of_best = sum(c(p).^2 for c in constraints)
......@@ -151,8 +166,6 @@ function (es::EvolutionStrategy)(fitness_func::FitnessFunction, initial_populati
elseif infeasible_count == 4
rg = β₂ * rg
end
end
return best_individual, best_fitness, log_history
......
......@@ -168,7 +168,15 @@ function (f::g11_instance)(population::Population)
return [chromosome.genes[1]^2 + (chromosome.genes[2]-1)^2 for chromosome in population.members]
end
struct g24_instance <: FitnessFunction
end
function (f::g24_instance)(population::Population)
return [-chromosome.genes[1] - chromosome.genes[2] for chromosome in population.members]
end
export FitnessFunction, OneMax, LABS, Sphere, Rosenbrock, Linear, Step, Rastrigin, Griewank, Schwefel, TSPFitnessFunction
export MultiObjectiveFitnessFunction, Constraint, g06_instance, g08_instance, g11_instance
export MultiObjectiveFitnessFunction, Constraint, g06_instance, g08_instance, g11_instance, g24_instance
Base.:+(f::Function, g::Function) = x -> f(x) + g(x)
Base.:+(g1::Constraint, g2::Constraint) = Constraint("<=", g1.fun + g2.fun)
end
\ No newline at end of file
module ReplacementStrategies
using ..Chromosomes
using ..Selections
abstract type ReplacementStrategy end
......@@ -40,7 +41,22 @@ end
struct UnionReplacement <: ReplacementStrategy end
function (f::UnionReplacement)(population::Population, offspring::Population)
return population + offspring
combined_population = population + offspring
selected = Vector{Int64}(undef, length(population))
selected_count = 0
fronts = calculate_non_dominated_fronts(combined_population)
for front in fronts
if length(front) + selected_count <= length(population)
selected[selected_count+1:selected_count+length(front)] = front
selected_count += length(front)
else
crowding_distances = calculate_crowding_distance(combined_population, front)
sorted_idx = sortperm(crowding_distances, rev=true)
front = front[sorted_idx]
selected[selected_count+1:length(population)] = front[1:length(population)-selected_count]
end
end
return combined_population[selected]
end
export ReplacementStrategy, GenerationalReplacement, FirstImprovingLocalSearchReplacement, SteadyStateTruncationReplacement
......
......@@ -25,25 +25,49 @@ function (f::IdentitySelection)(population::Population, num_selected::Int64, pen
end
struct BinaryTournament <: Selection
num_selected::Int64
# num_selected::Int64
tournament_output_size::Int64
end
function (f::BinaryTournament)(population::Population, num_selected::Int64, penalized_fitness::Vector{Float64})
selected = Vector{Int64}(undef, f.num_selected)
selected_count = 0
fronts = calculate_non_dominated_fronts(population)
for front in fronts
if length(front) + selected_count <= f.num_selected
selected[selected_count+1:selected_count+length(front)] = front
selected_count += length(front)
selected = Vector{Int64}(undef, f.tournament_output_size)
for i = 1:f.tournament_output_size
tournament = rand(1:length(population), 2)
no_front1 = findfirst(x -> tournament[1] in x, fronts)
no_front2 = findfirst(x -> tournament[2] in x, fronts)
if no_front1 < no_front2
selected[i] = tournament[1]
elseif no_front2 < no_front1
selected[i] = tournament[2]
else
crowding_distances = calculate_crowding_distance(population, front)
sorted_idx = sortperm(crowding_distances, rev=true)
front = front[sorted_idx]
selected[selected_count+1:num_selected] = front[1:f.num_selected-selected_count]
cd1 = calculate_crowding_distance(population, fronts[no_front1])[findfirst(x->x==tournament[1],fronts[no_front1])]
cd2 = calculate_crowding_distance(population, fronts[no_front2])[findfirst(x->x==tournament[2],fronts[no_front2])]
if cd2 > cd1
selected[i] = tournament[2]
else
selected[i] = tournament[1]
end
end
end
return population[selected]
# selected = Vector{Int64}(undef, f.tournament_output_size)
# selected_count = 0
# fronts = calculate_non_dominated_fronts(population)
# for front in fronts
# @show length(front)
# if length(front) + selected_count <= f.tournament_output_size
# selected[selected_count+1:selected_count+length(front)] = front
# selected_count += length(front)
# else
# crowding_distances = calculate_crowding_distance(population, front)
# sorted_idx = sortperm(crowding_distances, rev=true)
# front = front[sorted_idx]
# selected[selected_count+1:f.tournament_output_size] = front[1:f.tournament_output_size-selected_count]
# end
# end
# return population[selected]
end
function calculate_non_dominated_fronts(population::Population)
......@@ -143,6 +167,6 @@ function get_stochastic_ranks(population::Population, penalties::Vector{Float64}
end
export Selection, TournamentSelection, IdentitySelection, BinaryTournament, calculate_non_dominated_fronts, StochasticRankingSelection
export get_stochastic_ranks
export get_stochastic_ranks, calculate_crowding_distance
end
\ No newline at end of file
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}
This diff is collapsed.
This diff is collapsed.
using Pkg
Pkg.activate("../../../EOALib")
Pkg.instantiate()
using IJulia
notebook(dir=".")
\ No newline at end of file
function compute_erd(histories, target_fitness_value)
n_runs = length(histories)
max_evals = maximum(maximum(h["function_evaluations"]) for h in histories)
step = max_evals/20
eval_points = 0:step:max_evals
all_probabilities = Dict{Int, Vector{Float64}}()
probabilities = []
for eval_point in eval_points
successful_runs = 0
for history in histories
# Find first index where we reach target fitness
eval_indices = history["function_evaluations"]
fitness_values = history["best_fitness"]
# Check if target was reached by this evaluation point
for (eval_idx, fitness) in zip(eval_indices, fitness_values)
if eval_idx > eval_point
break
end
if fitness <= target_fitness_value
successful_runs += 1
break
end
end
end
push!(probabilities, successful_runs / n_runs)
end
return eval_points, probabilities
end
function plot_optimization_results(fig_size, instances_names, N, thresholds, opt_histories)
fig = Figure(size=fig_size)
num_of_algorithms = length(opt_histories[1])
algo_names = collect(keys(opt_histories[1]))
colors = cgrad(:seaborn_crest_gradient, num_of_algorithms, categorical=true)
symbols = [:utriangle, :star5, :diamond, :hexagon, :circle, :rect, :cross, :xcross, :dtriangle, :ltriangle, :rtriangle, :pentagon, :star4, :star6, :star8][1:num_of_algorithms]
ax_fitness = [Axis(fig[1,i], xlabel="#f-evals [-]", ylabel="Fitness value [-]", title="Instance "*instances_names[i], xminorgridvisible=true, xtickalign=1, yminorgridvisible=true, ytickalign=1) for i=1:length(instances_names)]
ax_erd = [Axis(fig[2,i], xlabel="#f-evals [-]", ylabel="Prob of success [-]", xminorgridvisible=true, xtickalign=1, yminorgridvisible=true, ytickalign=1) for i=1:length(instances_names)]
ax_box = [Axis(fig[3,i], xlabel="Algorithm [-]", ylabel="Final fitness value [-]", xminorgridvisible=true, xtickalign=1, yminorgridvisible=true, ytickalign=1, xticks=(1:num_of_algorithms, algo_names)) for i=1:length(instances_names)]
erd_thresholds = thresholds
for i = 1:length(instances_names)
ending_fitness_values = []
ending_algo_category = []
for k = 1:num_of_algorithms
for j = 1:N
lines!(ax_fitness[i], opt_histories[i][algo_names[k]][j]["function_evaluations"], opt_histories[i][algo_names[k]][j]["best_fitness"], color=colors[k], alpha=0.5, label=algo_names[k])
push!(ending_fitness_values, opt_histories[i][algo_names[k]][j]["best_fitness"][end])
push!(ending_algo_category, k)
end
num_evals, probs = compute_erd(opt_histories[i][algo_names[k]], erd_thresholds[i])
lines!(ax_erd[i], num_evals, probs, color=colors[k], label=algo_names[k])
scatter!(ax_erd[i], num_evals, probs, color=colors[k], marker=symbols[k])
end
boxplot!(ax_box[i], ending_algo_category, ending_fitness_values, color=colors[1])
hlines!(ax_fitness[i], erd_thresholds[i], color=:black, label="Threshold")
axislegend(ax_fitness[i], unique=true)
axislegend(ax_erd[i], unique=true, position=:lt)
end
return fig
end
function plot_optimization_results_mo(fig_size, instances_names, N, opt_histories)
fig = Figure(size=fig_size)
num_of_algorithms = length(opt_histories[1])
algo_names = collect(keys(opt_histories[1]))
colors = cgrad(:seaborn_crest_gradient, N, categorical=true)
symbols = [:utriangle, :star5, :diamond, :hexagon, :circle, :rect, :cross, :xcross, :dtriangle, :ltriangle, :rtriangle, :pentagon, :star4, :star6, :star8]
ax_fitness = [Axis(fig[1,i], xlabel="Fitness value [-]", ylabel="Constraint violation value [-]", title="Instance "*instances_names[i], xminorgridvisible=true, xtickalign=1, yminorgridvisible=true, ytickalign=1) for i=1:length(instances_names)]
for i = 1:length(instances_names)
ending_fitness_values = []
ending_algo_category = []
for k = 1:num_of_algorithms
for j = 1:N
scatter!(ax_fitness[i], opt_histories[i][algo_names[k]][j]["best_fitness"][end][1],opt_histories[i][algo_names[k]][j]["best_fitness"][end][2], color=colors[j], alpha=0.75, label=algo_names[k])
end
end
axislegend(ax_fitness[i], unique=true)
end
return fig
end
\ No newline at end of file
......@@ -16,7 +16,7 @@ end
μ = 200
λ = 400
generations = 50
generations = 200
dimension = 2
lb = -5.0
ub = 5.0
......
......@@ -14,8 +14,8 @@ end
μ = 200
λ = 200
generations = 1000
λ = 400
generations = 100
dimension = 2
lb = -5.0
ub = 5.0
......@@ -31,9 +31,10 @@ es = EvolutionStrategy(
μ,
λ,
generations,
BinaryTournament(μ),
SinglePointCrossover(),
# BinaryTournament(μ),
BinaryTournament(30),
LineCrossover(),
GaussianPerturbation(0.1),
UnionReplacement()
)
p = es(f, initial_population)
p, h = es(f, initial_population)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment