math-optim/algo/ga/de.go
leo 7fabbf75e8
All checks were successful
continuous-integration/drone/push Build is passing
ga: add pure Differential Evolution
2023-02-23 22:35:56 +01:00

412 lines
10 KiB
Go

// Copyright 2023 wanderer <a_mirre at utb dot cz>
// SPDX-License-Identifier: GPL-3.0-or-later
package ga
import (
"sort"
"time"
"golang.org/x/exp/rand"
"git.dotya.ml/wanderer/math-optim/bench"
"git.dotya.ml/wanderer/math-optim/stats"
"gonum.org/v1/gonum/stat/distuv"
)
// DE is a holder for the settings of an instance of a Differential Evolution
// (DE) algorithm.
type DE struct {
// Generations denotes the number of generations the population evolves
// for. Special value -1 disables limiting the number of generations.
Generations int
// BenchMinIters is the number of iterations the bench function will be re-run (for statistical purposes).
BenchMinIters int
// Dimensions to solve the problem for.
Dimensions []int
// F is the differential weight (mutation/weighting factor).
F float64
// CR is the crossover probability constant.
CR float64
// MutationStrategy selects the mutation strategy, i.e. the variant of the
// DE algorithm (0..17), see mutationStrategies.go for more details.
MutationStrategy int
// NP is the initial population size.
NP int
// BenchName is a name of the problem to optimise.
BenchName string
// ch is a channel for writing back computed results.
ch chan []stats.Stats
// chAlgoMeans is a channel for writing back algo means.
chAlgoMeans chan *stats.AlgoBenchMean
// initialised denotes the initialisation state of the struct.
initialised bool
}
const (
// MinNPDE is the minimum size of the initial population for DE.
minNPDE = 4
// fMin is the minimum allowed value of the differential weight.
fMinDE = 0.5
// fMax is the maximum allowed value of the differential weight.
fMaxDE = 2.0
// crMin is the minimum allowed value of the crossover probability constant.
crMinDE = 0.2
// crMax is the maximum allowed value of the crossover probability constant.
crMaxDE = 0.9
)
// dELogger is a "custom" DE logger.
var dELogger = newLogger(" *** δ DE:")
// Init initialises the DE algorithm, performs sanity checks on the inputs.
func (d *DE) Init(
generations, benchMinIters, mutStrategy, np int,
f, cr float64,
dimensions []int,
bench string,
ch chan []stats.Stats,
chAlgoMeans chan *stats.AlgoBenchMean,
) {
if d == nil {
dELogger.Fatalln("DE needs to be initialised before calling Run(), exiting...")
}
// check input parameters.
switch {
case generations == 0:
dELogger.Fatalln("Generations cannot be 0, got", generations)
case generations == -1:
dELogger.Println("Generations is '-1', disabling generation limits..")
case benchMinIters < 1:
dELogger.Fatalln("Minimum bench iterations cannot be less than 1, got:", benchMinIters)
case mutStrategy < 0 || mutStrategy > 17:
dELogger.Fatalln("Mutation strategy needs to be from the interval <0; 17>, got", mutStrategy)
case np < minNPDE:
dELogger.Fatalf("NP cannot be less than %d, got: %d\n.", minNPDE, np)
case f < fMinDE || f > fMaxDE:
dELogger.Fatalf("F needs to be from the interval <%f;%f>, got: %f\n.", fMinDE, fMaxDE, f)
case cr < crMinDE || cr > crMaxDE:
dELogger.Fatalf("CR needs to be from the interval <%f;>%f, got: %f\n.", crMinDE, crMaxDE, cr)
case len(dimensions) == 0:
dELogger.Fatalf("Dimensions cannot be empty, got: %+v\n", dimensions)
case bench == "":
dELogger.Fatalln("Bench cannot be unset, got:", bench)
}
d.Generations = generations
d.BenchMinIters = benchMinIters
d.MutationStrategy = mutStrategy
d.NP = np
d.F = f
d.CR = cr
d.Dimensions = dimensions
d.BenchName = bench
d.ch = ch
d.chAlgoMeans = chAlgoMeans
d.initialised = true
}
// InitAndRun initialises the DE algorithm, performs sanity checks on the
// inputs and calls the Run method.
func (d *DE) InitAndRun(
generations, benchMinIters, mutStrategy, np int,
f, cr float64,
dimensions []int,
bench string,
ch chan []stats.Stats,
chAlgoMeans chan *stats.AlgoBenchMean,
) {
if d == nil {
dELogger.Fatalln("DE is nil, NewDE() needs to be called first. exiting...")
}
d.Init(
generations, benchMinIters, mutStrategy, np,
f, cr,
dimensions,
bench,
ch,
chAlgoMeans,
)
d.Run()
}
// Run self-adapting differential evolution algorithm.
func (d *DE) Run() {
if d == nil {
dELogger.Fatalln("DE is nil, NewDE() needs to be called first. exiting...")
}
if !d.initialised {
dELogger.Fatalln("DE needs to be initialised before calling Run(), exiting...")
}
var dEStats []stats.Stats
dEMeans := &stats.AlgoBenchMean{
Algo: "DE",
BenchMeans: make([]stats.BenchMean, 0, len(d.Dimensions)),
}
// run evolve for for all dimensions.
for _, dim := range d.Dimensions {
maxFES := bench.GetGAMaxFES(dim)
fesPerIter := int(float64(maxFES / d.NP))
dEStatDimX := &stats.Stats{
Algo: "DE",
Dimens: dim,
Iterations: d.BenchMinIters,
Generations: maxFES,
NP: d.NP,
F: d.F,
CR: d.CR,
}
funcStats := &stats.FuncStats{BenchName: d.BenchName}
dimXMean := &stats.BenchMean{
Bench: d.BenchName,
Dimens: dim,
Iterations: d.BenchMinIters,
Generations: maxFES,
Neighbours: -1,
}
benchFuncParams := bench.FunctionParams[d.BenchName]
uniDist := distuv.Uniform{
Min: benchFuncParams.Min(),
Max: benchFuncParams.Max(),
}
funcStats.BenchResults = make([]stats.BenchRound, d.BenchMinIters)
// rand.Seed(uint64(time.Now().UnixNano()))
// create and seed a source of preudo-randomness
src := rand.NewSource(uint64(rand.Int63()))
// track execution duration.
start := time.Now()
for iter := 0; iter < d.BenchMinIters; iter++ {
dELogger.Printf("run: %d, bench: %s, %dD, started at %s",
iter, d.BenchName, dim, start,
)
funcStats.BenchResults[iter].Iteration = iter
uniDist.Src = src
// create a population with known params.
pop := newPopulation(d.BenchName, d.NP, dim)
// set population seed.
pop.Seed = uint64(time.Now().UnixNano())
// initialise the population.
pop.Init()
var bestResult float64
// the core.
for i := 0; i < fesPerIter; i++ {
r := d.evaluate(pop)
// distinguish the first or any of the subsequent iterations.
switch i {
case 0:
bestResult = r
default:
// call evolve where actual DE runs.
d.evolve(pop, &uniDist)
// take measurements of current population fitness.
r = d.evaluate(pop)
// save if better.
if r < bestResult {
bestResult = r
}
}
funcStats.BenchResults[iter].Results = append(
funcStats.BenchResults[iter].Results,
bestResult,
)
// this block makes sure we properly count func evaluations for
// the purpose of correctly comparable plot comparison. i.e.
// append the winning (current best) value NP-1 (the first best
// is already saved at this point) times to represent the fact
// that while evaluating (and comparing) other population
// individuals to the current best value is taking place in the
// background, the current best value itself is kept around and
// symbolically saved as the best of the Generation.
for x := 0; x < d.NP-1; x++ {
funcStats.BenchResults[iter].Results = append(
funcStats.BenchResults[iter].Results,
bestResult,
)
}
}
}
elapsed := time.Since(start)
dELogger.Printf("completed: bench: %s, %dD, computing took %s\n",
d.BenchName, dim, elapsed,
)
// get mean vals.
dimXMean.MeanVals = stats.GetMeanVals(funcStats.BenchResults, maxFES)
funcStats.MeanVals = dimXMean.MeanVals
dEStatDimX.BenchFuncStats = append(
dEStatDimX.BenchFuncStats, *funcStats,
)
dEStats = append(dEStats, *dEStatDimX)
dEMeans.BenchMeans = append(dEMeans.BenchMeans, *dimXMean)
}
sort.Sort(dEMeans)
d.chAlgoMeans <- dEMeans
d.ch <- dEStats
}
// evaluate evaluates the fitness of current population.
func (d *DE) evaluate(pop *Population) float64 {
f := bench.Functions[pop.Problem]
bestIndividual := pop.Population[pop.GetBestIdx()].CurX
bestSolution := f(bestIndividual)
return bestSolution
}
// evolve evolves a population by running the DE (Differential Evolution)
// algorithm on the passed population until termination conditions are met.
// nolint: gocognit
func (d *DE) evolve(pop *Population, uniDist *distuv.Uniform) {
popCount := len(pop.Population)
for i, currentIndividual := range pop.Population {
idcs := make([]int, 0, popCount-1)
// gather indices.
for k := 0; k < popCount; k++ {
if k != i {
idcs = append(idcs, k)
}
}
// randomly choose 3 of those idcs.
selectedIdcs := make([]int, 0)
selectedA := false
selectedB := false
selectedC := false
for !selectedA {
candidateA := rand.Intn(len(idcs)) % len(idcs)
if candidateA != i {
selectedIdcs = append(selectedIdcs, candidateA)
selectedA = true
}
}
for !selectedB {
a := selectedIdcs[0]
candidateB := rand.Intn(len(idcs)) % len(idcs)
if candidateB != i && candidateB != a {
selectedIdcs = append(selectedIdcs, candidateB)
selectedB = true
}
}
for !selectedC {
a := selectedIdcs[0]
b := selectedIdcs[1]
candidateC := rand.Intn(len(idcs)) % len(idcs)
if candidateC != i && candidateC != a && candidateC != b {
selectedIdcs = append(selectedIdcs, candidateC)
selectedC = true
}
}
// selected contains the selected population individuals.
selected := make([]PopulationIndividual, 0)
// select individuals for rand/1/bin
for _, idx := range selectedIdcs {
for k := 0; k < popCount; k++ {
if k == idx {
selected = append(selected, pop.Population[idx])
}
}
}
mutant := make([]float64, pop.Dimen)
// mutate.
for k := 0; k < pop.Dimen; k++ {
// mutant = a + mut * (b - c)
mutant[k] = selected[0].CurX[k] + (d.F * (selected[1].CurX[k] - selected[2].CurX[k]))
// clip values to <0;1>.
switch {
case mutant[k] < 0:
mutant[k] = 0
case mutant[k] > 1:
mutant[k] = 1
}
}
crossPoints := make([]bool, pop.Dimen)
// prepare crossover points (binomial crossover).
for k := 0; k < pop.Dimen; k++ {
if v := uniDist.Rand(); v < d.CR {
crossPoints[k] = true
} else {
crossPoints[k] = false
}
}
trial := make([]float64, pop.Dimen)
// recombine using crossover points.
for k := 0; k < pop.Dimen; k++ {
if crossPoints[k] {
trial[k] = currentIndividual.CurX[k]
}
}
f := bench.Functions[pop.Problem]
currentFitness := f(currentIndividual.CurX)
trialFitness := f(trial)
// replace if better.
if trialFitness < currentFitness {
pop.Population[i].CurX = trial
}
}
}
// NewDE returns a pointer to a new, uninitialised DE instance.
func NewDE() *DE {
return &DE{}
}