math-optim/algo/ga/jde.go
leo 3d83683b8b
All checks were successful
continuous-integration/drone/push Build is passing
add ga (Genetic Algorightms) package
2023-02-11 20:04:02 +01:00

428 lines
12 KiB
Go

// Copyright 2023 wanderer <a_mirre at utb dot cz>
// SPDX-License-Identifier: GPL-3.0-or-later
package ga
import (
"sort"
"time"
"golang.org/x/exp/rand"
"git.dotya.ml/wanderer/math-optim/bench/cec2020"
"git.dotya.ml/wanderer/math-optim/stats"
"gonum.org/v1/gonum/stat/distuv"
)
// JDE is a holder for the settings of an instance of a self-adapting
// differential evolution (jDE) algorithm.
type JDE struct {
// Generations denotes the number of generations the population evolves
// for. Special value -1 disables limiting the number of generations.
Generations int
// BenchMinIters is the number of iterations the bench function will be re-run (for statistical purposes).
BenchMinIters int
// Dimensions to solve the problem for.
Dimensions []int
// F is the initial value of the differential weight (mutation/weighting factor).
F float64
// CR is the initial value of the crossover probability constant.
CR float64
// rngF is a random number generator for differential weight (mutation/weighting factor).
rngF distuv.Uniform
// cr is a random number generator for crossover probability constant adapted over time.
rngCR distuv.Uniform
// MutationStrategy selects the mutation strategy, i.e. the variant of the
// jDE algorithm (0..17), see mutationStrategies.go for more details.
MutationStrategy int
// AdptScheme is the parameter self-adaptation scheme (0..1).
AdptScheme int
// NP is the initial population size.
NP int
// BenchName is a name of the problem to optimise.
BenchName string
// ch is a channel for writing back computed results.
ch chan []stats.Stats
// chAlgoMeans is a channel for writing back algo means.
chAlgoMeans chan *stats.AlgoBenchMean
// initialised denotes the initialisation state of the struct.
initialised bool
}
const (
// jDEMinNP is the minimum size of the initial population for jDE.
// for jDE PaGMO specifies 8.
jDEMinNP = 4
// fMin is the minimum allowed value of the differential weight.
fMin = 0.36
// fMax is the maximum allowed value of the differential weight.
fMax = 1.0
// crMin is the minimum allowed value of the crossover probability constant.
crMin = 0.0
// crMax is the maximum allowed value of the crossover probability constant.
crMax = 1.0
// tau1 is used in parameter self-adaptation.
tau1 = 0.1
// tau2 is used in parameter self-adaptation.
tau2 = 0.1
// fl is used in parameter self-adaptation.
fl = 0.1
// fu is used in parameter self-adaptation.
fu = 0.9
)
// dELogger is a "custom" jDE logger.
var jDELogger = newLogger(" *** δ jDE:")
// Init initialises the jDE algorithm, performs sanity checks on the inputs.
func (j *JDE) Init(generations, benchMinIters, mutStrategy, adptScheme, np int, f, cr float64, dimensions []int, bench string, ch chan []stats.Stats, chAlgoMeans chan *stats.AlgoBenchMean) {
if j == nil {
jDELogger.Fatalln("jDE needs to be initialised before calling Run(), exiting...")
}
// check input parameters.
switch {
case generations == 0:
jDELogger.Fatalln("Generations cannot be 0, got", generations)
case generations == -1:
jDELogger.Println("Generations is '-1', disabling generation limits..")
case benchMinIters < 1:
jDELogger.Fatalln("Minimum bench iterations cannot be less than 1, got:", benchMinIters)
case mutStrategy < 0 || mutStrategy > 17:
jDELogger.Fatalln("Mutation strategy needs to be from the interval <0; 17>, got", mutStrategy)
case adptScheme < 0 || adptScheme > 1:
jDELogger.Fatalln("Parameter self-adaptation scheme needs to be from the interval <0; 1>, got", adptScheme)
case np < jDEMinNP:
jDELogger.Fatalf("NP cannot be less than %d, got: %d\n.", jDEMinNP, np)
case f < fMin || f > fMax:
jDELogger.Fatalf("F needs to be from the interval <%f;%f>, got: %f\n.", fMin, fMax, f)
case cr < crMin || cr > crMax:
jDELogger.Fatalf("CR needs to be from the interval <%f;>%f, got: %f\n.", crMin, crMax, cr)
case len(dimensions) == 0:
jDELogger.Fatalf("Dimensions cannot be empty, got: %+v\n", dimensions)
case bench == "":
jDELogger.Fatalln("Bench cannot be empty, got:", bench)
}
j.Generations = generations
j.BenchMinIters = benchMinIters
j.MutationStrategy = mutStrategy
j.AdptScheme = adptScheme
j.NP = np
j.F = f
j.CR = cr
rngsrc := rand.NewSource(uint64(rand.Int63()))
j.rngF = distuv.Uniform{Min: fMin, Max: fMax, Src: rngsrc}
j.rngCR = distuv.Uniform{Min: crMin, Max: crMax, Src: rngsrc}
j.Dimensions = dimensions
j.BenchName = bench
j.ch = ch
j.chAlgoMeans = chAlgoMeans
j.initialised = true
gaLogger.Printf("jDE init done for bench %s", j.BenchName)
}
// InitAndRun initialises the jDE algorithm, performs sanity checks on the
// inputs and calls the Run method.
func (j *JDE) InitAndRun(generations, benchMinIters, mutStrategy, adptScheme, np int, f, cr float64, dimensions []int, bench string, ch chan []stats.Stats, chAlgoMeans chan *stats.AlgoBenchMean) {
if j == nil {
jDELogger.Fatalln("jDE is nil, NewjDE() needs to be called first. exiting...")
}
j.Init(generations, benchMinIters, mutStrategy, adptScheme, np, f, cr, dimensions, bench, ch, chAlgoMeans)
j.Run()
}
// Run self-adapting differential evolution algorithm.
func (j *JDE) Run() {
if j == nil {
jDELogger.Fatalln("jDE is nil, NewjDE() needs to be called first. exiting...")
}
if !j.initialised {
jDELogger.Fatalln("jDE needs to be initialised before calling Run(), exiting...")
}
var jDEStats []stats.Stats
jDEMeans := &stats.AlgoBenchMean{
Algo: "jDE",
BenchMeans: make([]stats.BenchMean, 0, len(j.Dimensions)),
}
// run evolve for all dimensions.
for _, dim := range j.Dimensions {
maxFES := cec2020.GetMaxFES(dim)
fesPerIter := int(float64(maxFES / j.NP))
jDEStatDimX := &stats.Stats{
Algo: "jDE",
Dimens: dim,
Iterations: j.BenchMinIters,
Generations: maxFES,
NP: j.NP,
F: j.F,
CR: j.CR,
}
funcStats := &stats.FuncStats{BenchName: j.BenchName}
dimXMean := &stats.BenchMean{
Bench: j.BenchName,
Dimens: dim,
Iterations: j.BenchMinIters,
Generations: maxFES,
Neighbours: -1,
}
uniDist := distuv.Uniform{
Min: cec2020.SearchRange.Min(),
Max: cec2020.SearchRange.Max(),
}
if maxFES == -1 {
jDELogger.Fatalf("could not get maxFES for current dim (%d), bailing", dim)
}
jDELogger.Printf("running bench \"%s\" for %dD, maxFES: %d\n",
j.BenchName, dim, maxFES,
)
funcStats.BenchResults = make([]stats.BenchRound, j.BenchMinIters)
// rand.Seed(uint64(time.Now().UnixNano()))
// create and seed a source of pseudo-randomness
src := rand.NewSource(uint64(rand.Int63()))
// track execution duration.
start := time.Now()
for iter := 0; iter < j.BenchMinIters; iter++ {
jDELogger.Printf("run: %d, bench: %s, %dD, started at %s",
iter, j.BenchName, dim, start,
)
funcStats.BenchResults[iter].Iteration = iter
uniDist.Src = src
// uniDist.Src = rand.NewSource(uint64(rand.Int63()))
// create a population with known params.
pop := newPopulation(j.BenchName, j.NP, dim)
// set population seed.
pop.Seed = uint64(time.Now().UnixNano())
// initialise the population.
pop.Init()
var bestResult float64
// the core.
for i := 0; i < fesPerIter; i++ {
r := j.evaluate(pop)
// distinguish the first or any of the subsequent iterations.
switch i {
case 0:
bestResult = r
default:
// call evolve where jDE runs.
j.evolve(pop, &uniDist)
// take measurements of current population fitness.
r = j.evaluate(pop)
// save if better.
if r < bestResult {
bestResult = r
}
}
funcStats.BenchResults[iter].Results = append(
funcStats.BenchResults[iter].Results,
bestResult,
)
// this block makes sure we properly count func evaluations for
// the purpose of correctly comparable plot comparison. i.e.
// append the winning (current best) value NP-1 (the first best
// is already saved at this point) times to represent the fact
// that while evaluating (and comparing) other population
// individuals to the current best value is taking place in the
// background, the current best value itself is kept around and
// symbolically saved as the best of the Generation.
for x := 0; x < j.NP-1; x++ {
funcStats.BenchResults[iter].Results = append(
funcStats.BenchResults[iter].Results,
bestResult,
)
}
}
}
elapsed := time.Since(start)
jDELogger.Printf("completed: bench: %s, %dD, computing took %s\n",
j.BenchName, dim, elapsed,
)
// get mean vals.
dimXMean.MeanVals = stats.GetMeanVals(funcStats.BenchResults, maxFES)
funcStats.MeanVals = dimXMean.MeanVals
jDEStatDimX.BenchFuncStats = append(
jDEStatDimX.BenchFuncStats, *funcStats,
)
jDEStats = append(jDEStats, *jDEStatDimX)
jDEMeans.BenchMeans = append(jDEMeans.BenchMeans, *dimXMean)
}
sort.Sort(jDEMeans)
j.chAlgoMeans <- jDEMeans
j.ch <- jDEStats
}
// evaluate evaluates the fitness of current population.
func (j *JDE) evaluate(pop *Population) float64 {
f := pop.ProblemFunc
bestIndividual := pop.Population[pop.GetBestIdx()].CurX
bestSolution := f(bestIndividual)
return bestSolution
}
// evolve evolves a population by running the jDE (self-adapting Differential
// Evolution) algorithm on the passed population until termination conditions
// are met.
// nolint: gocognit
func (j *JDE) evolve(pop *Population, uniDist *distuv.Uniform) {
f := pop.ProblemFunc
genChampIdx := pop.GetBestIdx()
genChamp := ChampionIndividual{
X: pop.Population[genChampIdx].CurX,
}
genChampFitness := f(genChamp.X)
for i, currentIndividual := range pop.Population {
currentFitness := f(currentIndividual.CurX)
if genChampFitness < f(pop.Champion.X) {
pop.Champion.X = genChamp.X
}
donors := pop.SelectDonors(i)
mutant := mutate(j.MutationStrategy, currentIndividual, genChamp, pop, donors...)
if mutant == nil {
jDELogger.Fatalln("Somehow you managed to pass in an out-of-bounds mutation strategy, don't know what to do, exiting...")
}
crossPoints := make([]bool, pop.Dimen)
// prepare crossover points (binomial crossover).
for k := 0; k < pop.Dimen; k++ {
if v := uniDist.Rand(); v < pop.BestCR {
crossPoints[k] = true
} else {
crossPoints[k] = false
}
}
trial := make([]float64, pop.Dimen)
// recombine using crossover points.
for k := 0; k < pop.Dimen; k++ {
if crossPoints[k] {
trial[k] = currentIndividual.CurX[k]
} else {
trial[k] = mutant[k]
}
}
trialFitness := f(trial)
// replace if better.
if trialFitness < currentFitness {
pop.Population[i].CurX = trial
}
}
// adapt parameters.
j.adaptParameters(pop)
}
// adptParameters adapts parameters F and CR based on
// https://labraj.feri.um.si/images/0/05/CEC09_slides_Brest.pdf.
func (j *JDE) adaptParameters(p *Population) {
var nuF float64
var nuCR float64
switch {
case j.AdptScheme == 0:
if rand2 := j.getRandF(); rand2 < tau1 {
nuF = fl + (j.getRandF() * fu)
} else {
nuF = p.f[len(p.f)-1]
}
if rand4 := j.getRandCR(); rand4 < tau2 {
nuCR = j.getRandCR()
} else {
nuCR = p.cr[len(p.cr)-1]
}
case j.AdptScheme == 1:
nuF = p.BestF + j.getRandF()*0.5
nuCR = p.BestCR + j.getRandCR()*0.5
}
// sort out F.
p.f = append(p.f, nuF)
p.CurF = nuF
// sort out CR
p.cr = append(p.cr, nuCR)
p.CurCR = nuCR
// update best, if improved.
switch {
case nuF < p.BestF:
p.BestF = nuF
case nuCR < p.BestCR:
p.BestCR = nuCR
}
}
// getRandF returns a random value of F for parameter self-adaptation.
func (j *JDE) getRandF() float64 {
return j.rngF.Rand()
}
// getRandCR returns a random value of CR for parameter self-adaptation.
func (j *JDE) getRandCR() float64 {
return j.rngCR.Rand()
}
// NewjDE returns a pointer to a new, uninitialised jDE instance.
func NewjDE() *JDE {
return &JDE{}
}