From 3d83683b8befda1c6dbd8d471e9284b8adea9423 Mon Sep 17 00:00:00 2001 From: leo Date: Sat, 11 Feb 2023 20:04:02 +0100 Subject: [PATCH] add ga (Genetic Algorightms) package --- algo/ga/doc.go | 5 + algo/ga/jde.go | 427 ++++++++++++++++++++++++++++++++++++++++++ algo/ga/log.go | 17 ++ algo/ga/mutation.go | 86 +++++++++ algo/ga/population.go | 249 ++++++++++++++++++++++++ 5 files changed, 784 insertions(+) create mode 100644 algo/ga/doc.go create mode 100644 algo/ga/jde.go create mode 100644 algo/ga/log.go create mode 100644 algo/ga/mutation.go create mode 100644 algo/ga/population.go diff --git a/algo/ga/doc.go b/algo/ga/doc.go new file mode 100644 index 0000000..3a11f51 --- /dev/null +++ b/algo/ga/doc.go @@ -0,0 +1,5 @@ +// Copyright 2023 wanderer +// SPDX-License-Identifier: GPL-3.0-or-later + +// Package ga implements Genetic Algorithms. +package ga diff --git a/algo/ga/jde.go b/algo/ga/jde.go new file mode 100644 index 0000000..9e846ca --- /dev/null +++ b/algo/ga/jde.go @@ -0,0 +1,427 @@ +// Copyright 2023 wanderer +// SPDX-License-Identifier: GPL-3.0-or-later + +package ga + +import ( + "sort" + "time" + + "golang.org/x/exp/rand" + + "git.dotya.ml/wanderer/math-optim/bench/cec2020" + "git.dotya.ml/wanderer/math-optim/stats" + "gonum.org/v1/gonum/stat/distuv" +) + +// JDE is a holder for the settings of an instance of a self-adapting +// differential evolution (jDE) algorithm. +type JDE struct { + // Generations denotes the number of generations the population evolves + // for. Special value -1 disables limiting the number of generations. + Generations int + // BenchMinIters is the number of iterations the bench function will be re-run (for statistical purposes). + BenchMinIters int + // Dimensions to solve the problem for. + Dimensions []int + // F is the initial value of the differential weight (mutation/weighting factor). + F float64 + // CR is the initial value of the crossover probability constant. + CR float64 + // rngF is a random number generator for differential weight (mutation/weighting factor). + rngF distuv.Uniform + // cr is a random number generator for crossover probability constant adapted over time. + rngCR distuv.Uniform + // MutationStrategy selects the mutation strategy, i.e. the variant of the + // jDE algorithm (0..17), see mutationStrategies.go for more details. + MutationStrategy int + // AdptScheme is the parameter self-adaptation scheme (0..1). + AdptScheme int + // NP is the initial population size. + NP int + // BenchName is a name of the problem to optimise. + BenchName string + // ch is a channel for writing back computed results. + ch chan []stats.Stats + // chAlgoMeans is a channel for writing back algo means. + chAlgoMeans chan *stats.AlgoBenchMean + // initialised denotes the initialisation state of the struct. + initialised bool +} + +const ( + // jDEMinNP is the minimum size of the initial population for jDE. + // for jDE PaGMO specifies 8. + jDEMinNP = 4 + // fMin is the minimum allowed value of the differential weight. + fMin = 0.36 + // fMax is the maximum allowed value of the differential weight. + fMax = 1.0 + // crMin is the minimum allowed value of the crossover probability constant. + crMin = 0.0 + // crMax is the maximum allowed value of the crossover probability constant. + crMax = 1.0 + + // tau1 is used in parameter self-adaptation. + tau1 = 0.1 + // tau2 is used in parameter self-adaptation. + tau2 = 0.1 + // fl is used in parameter self-adaptation. + fl = 0.1 + // fu is used in parameter self-adaptation. + fu = 0.9 +) + +// dELogger is a "custom" jDE logger. +var jDELogger = newLogger(" *** δ jDE:") + +// Init initialises the jDE algorithm, performs sanity checks on the inputs. +func (j *JDE) Init(generations, benchMinIters, mutStrategy, adptScheme, np int, f, cr float64, dimensions []int, bench string, ch chan []stats.Stats, chAlgoMeans chan *stats.AlgoBenchMean) { + if j == nil { + jDELogger.Fatalln("jDE needs to be initialised before calling Run(), exiting...") + } + + // check input parameters. + switch { + case generations == 0: + jDELogger.Fatalln("Generations cannot be 0, got", generations) + + case generations == -1: + jDELogger.Println("Generations is '-1', disabling generation limits..") + + case benchMinIters < 1: + jDELogger.Fatalln("Minimum bench iterations cannot be less than 1, got:", benchMinIters) + + case mutStrategy < 0 || mutStrategy > 17: + jDELogger.Fatalln("Mutation strategy needs to be from the interval <0; 17>, got", mutStrategy) + + case adptScheme < 0 || adptScheme > 1: + jDELogger.Fatalln("Parameter self-adaptation scheme needs to be from the interval <0; 1>, got", adptScheme) + + case np < jDEMinNP: + jDELogger.Fatalf("NP cannot be less than %d, got: %d\n.", jDEMinNP, np) + + case f < fMin || f > fMax: + jDELogger.Fatalf("F needs to be from the interval <%f;%f>, got: %f\n.", fMin, fMax, f) + + case cr < crMin || cr > crMax: + jDELogger.Fatalf("CR needs to be from the interval <%f;>%f, got: %f\n.", crMin, crMax, cr) + + case len(dimensions) == 0: + jDELogger.Fatalf("Dimensions cannot be empty, got: %+v\n", dimensions) + + case bench == "": + jDELogger.Fatalln("Bench cannot be empty, got:", bench) + } + + j.Generations = generations + j.BenchMinIters = benchMinIters + j.MutationStrategy = mutStrategy + j.AdptScheme = adptScheme + j.NP = np + j.F = f + j.CR = cr + + rngsrc := rand.NewSource(uint64(rand.Int63())) + + j.rngF = distuv.Uniform{Min: fMin, Max: fMax, Src: rngsrc} + j.rngCR = distuv.Uniform{Min: crMin, Max: crMax, Src: rngsrc} + + j.Dimensions = dimensions + j.BenchName = bench + j.ch = ch + j.chAlgoMeans = chAlgoMeans + + j.initialised = true + + gaLogger.Printf("jDE init done for bench %s", j.BenchName) +} + +// InitAndRun initialises the jDE algorithm, performs sanity checks on the +// inputs and calls the Run method. +func (j *JDE) InitAndRun(generations, benchMinIters, mutStrategy, adptScheme, np int, f, cr float64, dimensions []int, bench string, ch chan []stats.Stats, chAlgoMeans chan *stats.AlgoBenchMean) { + if j == nil { + jDELogger.Fatalln("jDE is nil, NewjDE() needs to be called first. exiting...") + } + + j.Init(generations, benchMinIters, mutStrategy, adptScheme, np, f, cr, dimensions, bench, ch, chAlgoMeans) + + j.Run() +} + +// Run self-adapting differential evolution algorithm. +func (j *JDE) Run() { + if j == nil { + jDELogger.Fatalln("jDE is nil, NewjDE() needs to be called first. exiting...") + } + + if !j.initialised { + jDELogger.Fatalln("jDE needs to be initialised before calling Run(), exiting...") + } + + var jDEStats []stats.Stats + + jDEMeans := &stats.AlgoBenchMean{ + Algo: "jDE", + BenchMeans: make([]stats.BenchMean, 0, len(j.Dimensions)), + } + + // run evolve for all dimensions. + for _, dim := range j.Dimensions { + maxFES := cec2020.GetMaxFES(dim) + fesPerIter := int(float64(maxFES / j.NP)) + jDEStatDimX := &stats.Stats{ + Algo: "jDE", + Dimens: dim, + Iterations: j.BenchMinIters, + Generations: maxFES, + NP: j.NP, + F: j.F, + CR: j.CR, + } + funcStats := &stats.FuncStats{BenchName: j.BenchName} + dimXMean := &stats.BenchMean{ + Bench: j.BenchName, + Dimens: dim, + Iterations: j.BenchMinIters, + Generations: maxFES, + Neighbours: -1, + } + uniDist := distuv.Uniform{ + Min: cec2020.SearchRange.Min(), + Max: cec2020.SearchRange.Max(), + } + + if maxFES == -1 { + jDELogger.Fatalf("could not get maxFES for current dim (%d), bailing", dim) + } + + jDELogger.Printf("running bench \"%s\" for %dD, maxFES: %d\n", + j.BenchName, dim, maxFES, + ) + + funcStats.BenchResults = make([]stats.BenchRound, j.BenchMinIters) + + // rand.Seed(uint64(time.Now().UnixNano())) + // create and seed a source of pseudo-randomness + src := rand.NewSource(uint64(rand.Int63())) + + // track execution duration. + start := time.Now() + + for iter := 0; iter < j.BenchMinIters; iter++ { + jDELogger.Printf("run: %d, bench: %s, %dD, started at %s", + iter, j.BenchName, dim, start, + ) + + funcStats.BenchResults[iter].Iteration = iter + + uniDist.Src = src + // uniDist.Src = rand.NewSource(uint64(rand.Int63())) + + // create a population with known params. + pop := newPopulation(j.BenchName, j.NP, dim) + + // set population seed. + pop.Seed = uint64(time.Now().UnixNano()) + // initialise the population. + pop.Init() + + var bestResult float64 + + // the core. + for i := 0; i < fesPerIter; i++ { + r := j.evaluate(pop) + + // distinguish the first or any of the subsequent iterations. + switch i { + case 0: + bestResult = r + + default: + // call evolve where jDE runs. + j.evolve(pop, &uniDist) + + // take measurements of current population fitness. + r = j.evaluate(pop) + + // save if better. + if r < bestResult { + bestResult = r + } + } + + funcStats.BenchResults[iter].Results = append( + funcStats.BenchResults[iter].Results, + bestResult, + ) + // this block makes sure we properly count func evaluations for + // the purpose of correctly comparable plot comparison. i.e. + // append the winning (current best) value NP-1 (the first best + // is already saved at this point) times to represent the fact + // that while evaluating (and comparing) other population + // individuals to the current best value is taking place in the + // background, the current best value itself is kept around and + // symbolically saved as the best of the Generation. + for x := 0; x < j.NP-1; x++ { + funcStats.BenchResults[iter].Results = append( + funcStats.BenchResults[iter].Results, + bestResult, + ) + } + } + } + + elapsed := time.Since(start) + + jDELogger.Printf("completed: bench: %s, %dD, computing took %s\n", + j.BenchName, dim, elapsed, + ) + + // get mean vals. + dimXMean.MeanVals = stats.GetMeanVals(funcStats.BenchResults, maxFES) + funcStats.MeanVals = dimXMean.MeanVals + + jDEStatDimX.BenchFuncStats = append( + jDEStatDimX.BenchFuncStats, *funcStats, + ) + + jDEStats = append(jDEStats, *jDEStatDimX) + + jDEMeans.BenchMeans = append(jDEMeans.BenchMeans, *dimXMean) + } + + sort.Sort(jDEMeans) + + j.chAlgoMeans <- jDEMeans + j.ch <- jDEStats +} + +// evaluate evaluates the fitness of current population. +func (j *JDE) evaluate(pop *Population) float64 { + f := pop.ProblemFunc + + bestIndividual := pop.Population[pop.GetBestIdx()].CurX + bestSolution := f(bestIndividual) + + return bestSolution +} + +// evolve evolves a population by running the jDE (self-adapting Differential +// Evolution) algorithm on the passed population until termination conditions +// are met. +// nolint: gocognit +func (j *JDE) evolve(pop *Population, uniDist *distuv.Uniform) { + f := pop.ProblemFunc + genChampIdx := pop.GetBestIdx() + genChamp := ChampionIndividual{ + X: pop.Population[genChampIdx].CurX, + } + genChampFitness := f(genChamp.X) + + for i, currentIndividual := range pop.Population { + currentFitness := f(currentIndividual.CurX) + + if genChampFitness < f(pop.Champion.X) { + pop.Champion.X = genChamp.X + } + + donors := pop.SelectDonors(i) + + mutant := mutate(j.MutationStrategy, currentIndividual, genChamp, pop, donors...) + + if mutant == nil { + jDELogger.Fatalln("Somehow you managed to pass in an out-of-bounds mutation strategy, don't know what to do, exiting...") + } + + crossPoints := make([]bool, pop.Dimen) + + // prepare crossover points (binomial crossover). + for k := 0; k < pop.Dimen; k++ { + if v := uniDist.Rand(); v < pop.BestCR { + crossPoints[k] = true + } else { + crossPoints[k] = false + } + } + + trial := make([]float64, pop.Dimen) + + // recombine using crossover points. + for k := 0; k < pop.Dimen; k++ { + if crossPoints[k] { + trial[k] = currentIndividual.CurX[k] + } else { + trial[k] = mutant[k] + } + } + + trialFitness := f(trial) + + // replace if better. + if trialFitness < currentFitness { + pop.Population[i].CurX = trial + } + } + + // adapt parameters. + j.adaptParameters(pop) +} + +// adptParameters adapts parameters F and CR based on +// https://labraj.feri.um.si/images/0/05/CEC09_slides_Brest.pdf. +func (j *JDE) adaptParameters(p *Population) { + var nuF float64 + + var nuCR float64 + + switch { + case j.AdptScheme == 0: + if rand2 := j.getRandF(); rand2 < tau1 { + nuF = fl + (j.getRandF() * fu) + } else { + nuF = p.f[len(p.f)-1] + } + + if rand4 := j.getRandCR(); rand4 < tau2 { + nuCR = j.getRandCR() + } else { + nuCR = p.cr[len(p.cr)-1] + } + + case j.AdptScheme == 1: + nuF = p.BestF + j.getRandF()*0.5 + nuCR = p.BestCR + j.getRandCR()*0.5 + } + + // sort out F. + p.f = append(p.f, nuF) + p.CurF = nuF + // sort out CR + p.cr = append(p.cr, nuCR) + p.CurCR = nuCR + + // update best, if improved. + switch { + case nuF < p.BestF: + p.BestF = nuF + + case nuCR < p.BestCR: + p.BestCR = nuCR + } +} + +// getRandF returns a random value of F for parameter self-adaptation. +func (j *JDE) getRandF() float64 { + return j.rngF.Rand() +} + +// getRandCR returns a random value of CR for parameter self-adaptation. +func (j *JDE) getRandCR() float64 { + return j.rngCR.Rand() +} + +// NewjDE returns a pointer to a new, uninitialised jDE instance. +func NewjDE() *JDE { + return &JDE{} +} diff --git a/algo/ga/log.go b/algo/ga/log.go new file mode 100644 index 0000000..4be6d5d --- /dev/null +++ b/algo/ga/log.go @@ -0,0 +1,17 @@ +// Copyright 2023 wanderer +// SPDX-License-Identifier: GPL-3.0-or-later + +package ga + +import ( + "log" + "os" +) + +// gaLogger declares and initialises a "custom" ga logger. +var gaLogger = log.New(os.Stderr, " *** Γ ga:", log.Ldate|log.Ltime|log.Lshortfile) + +// newLogger can be used to get a logger with a custom prefix. +func newLogger(prefix string) *log.Logger { + return log.New(os.Stderr, prefix, log.Ldate|log.Ltime|log.Lshortfile) +} diff --git a/algo/ga/mutation.go b/algo/ga/mutation.go new file mode 100644 index 0000000..10611d7 --- /dev/null +++ b/algo/ga/mutation.go @@ -0,0 +1,86 @@ +// Copyright 2023 wanderer +// SPDX-License-Identifier: GPL-3.0-or-later + +package ga + +// mutation strategies iota consts. + +const ( + best1Exp = iota + rand1Exp + randtoBest1Exp +) + +// mutations. + +func clipVals(mutant []float64) []float64 { + for i := range mutant { + switch { + case mutant[i] < 0: + mutant[i] = 0 + + case mutant[i] > 1: + mutant[i] = 1 + } + } + + return mutant +} + +// mutate performs the mutation of choice and returns the mutant. +func mutate(mutStrategy int, currentIndividual PopulationIndividual, genChamp ChampionIndividual, pop *Population, donors ...PopulationIndividual) []float64 { + var mutant []float64 + + dim := pop.Dimen + curF := pop.CurF + champ := genChamp.X + curX := currentIndividual.CurX + + switch mutStrategy { + case best1Exp: + mutant = mutBest1Exp(dim, curF, champ, donors...) + + case rand1Exp: + mutant = mutRand1Exp(dim, curF, donors...) + + case randtoBest1Exp: + mutant = mutRandtoBest1Exp(dim, curF, champ, curX, donors...) + default: + return nil + } + + // return mutant with values clipped to <0;1>. + return clipVals(mutant) +} + +func mutBest1Exp(dim int, curF float64, champ []float64, donors ...PopulationIndividual) []float64 { + mutant := make([]float64, dim) + + for i := 0; i < dim; i++ { + // mutant[i] = curF + (bestF*(donors[1].CurX[i]) - donors[2].CurX[i]) + mutant[i] = champ[i] + (curF*(donors[1].CurX[i]) - donors[2].CurX[i]) + } + + return mutant +} + +func mutRand1Exp(dim int, curF float64, donors ...PopulationIndividual) []float64 { + mutant := make([]float64, dim) + + for i := 0; i < dim; i++ { + // tmp[n] = popold[r1][n] + curF*(popold[r2][n]-popold[r3][n]); + mutant[i] = donors[0].CurX[i] + curF*(donors[1].CurX[i]-donors[2].CurX[i]) + } + + return mutant +} + +func mutRandtoBest1Exp(dim int, curF float64, champ, currentIndividual []float64, donors ...PopulationIndividual) []float64 { + mutant := make([]float64, dim) + + for i := 0; i < dim; i++ { + mutant[i] += currentIndividual[i] + (curF*champ[i] - currentIndividual[i]) + (curF * (donors[0].CurX[i] - donors[1].CurX[i])) + } + + return mutant +} diff --git a/algo/ga/population.go b/algo/ga/population.go new file mode 100644 index 0000000..cd72f2a --- /dev/null +++ b/algo/ga/population.go @@ -0,0 +1,249 @@ +// Copyright 2023 wanderer +// SPDX-License-Identifier: GPL-3.0-or-later + +package ga + +import ( + "git.dotya.ml/wanderer/math-optim/bench/cec2020" + "golang.org/x/exp/rand" + + "gonum.org/v1/gonum/stat/distuv" +) + +type ( + // DecisionVector is a []float64 abstraction representing the decision vector. + DecisionVector []float64 + // FitnessVector is a []float64 abstraction representing the fitness vector. + FitnessVector []float64 + // ConstraintVector is a []float64 abstraction representing the constraint vector. + ConstraintVector []float64 +) + +// PopulationIndividual represents a single population individual. +type PopulationIndividual struct { + CurX DecisionVector + CurV DecisionVector + CurC ConstraintVector + CurF FitnessVector + + BestX DecisionVector + BestC ConstraintVector + BestF FitnessVector +} + +// ChampionIndividual is a representation of the best individual currently +// available in the population. +type ChampionIndividual struct { + X DecisionVector + // CR float64 + // F float64 +} + +// Population groups population individuals (agents) with metadata about the population. +type Population struct { + // Population is a slice of population individuals. + Population []PopulationIndividual + // Champion represents the best individual of the population. + Champion ChampionIndividual + // Problem is the current benchmarking function this population is attempting to optimise. + Problem string + // ProblemFunction is the actual function to optimise. + ProblemFunc func([]float64) float64 + // Dimen is the dimensionality of the problem being optimised. + Dimen int + // Seed is the value used to (re)init population. + Seed uint64 + + // f is the differential weight (mutation/weighting factor) adapted over time. + f []float64 + // cr is the crossover probability constant adapted over time. + cr []float64 + // BestF is the best recorded value of the differential weight F. + BestF float64 + // BestCR is the best recorded value of the differential weight CR. + BestCR float64 + // CurF is the current value of F. + CurF float64 + // CurCR is the current value of the differential weight CR. + CurCR float64 +} + +// GetIndividual returns a reference to individual at position n. +func (p *Population) GetIndividual(n uint) *PopulationIndividual { return &PopulationIndividual{} } + +// GetBestIdx returns the index of the best population individual. +func (p *Population) GetBestIdx() int { + f := p.ProblemFunc + + bestIndividual := 0 + // the first one is the best one. + bestVal := f(p.Population[0].CurX) + + for i, v := range p.Population { + current := f(v.CurX) + + if current < bestVal { + bestIndividual = i + } + } + + return bestIndividual +} + +// GetWorstIdx returns the index of the worst population individual. +func (p *Population) GetWorstIdx() int { + f := p.ProblemFunc + + worstIndividual := 0 + // the first one is the worst one. + worstVal := f(p.Population[0].CurX) + + for i, v := range p.Population { + current := f(v.CurX) + + if current > worstVal { + worstIndividual = i + } + } + + return worstIndividual +} + +// Init initialises all individuals to random values. +func (p *Population) Init() { + uniform := distuv.Uniform{ + Min: cec2020.SearchRange.Min(), + Max: cec2020.SearchRange.Max(), + } + uniform.Src = rand.NewSource(p.Seed) + + // gaLogger.Printf("population initialisation - popCount: %d, seed: %d\n", + // len(p.Population), p.Seed, + // ) + + for i, v := range p.Population { + v.CurX = make([]float64, p.Dimen) + + for j := 0; j < p.Dimen; j++ { + v.CurX[j] = uniform.Rand() + } + + p.Population[i] = v + } + + p.f = make([]float64, p.Size()) + p.cr = make([]float64, p.Size()) + + p.Champion = ChampionIndividual{X: p.Population[p.GetBestIdx()].CurX} +} + +// Reinit reinitialises all individuals. +func (p *Population) Reinit() { + p.Init() +} + +// ReinitN reinitialises the individual at position n. +func (p *Population) ReinitN(n uint) {} + +// Clear sets all vectors to 0. +func (p *Population) Clear() { + if p.Population != nil { + for _, v := range p.Population { + v.CurX = make([]float64, p.Dimen) + v.CurC = make([]float64, p.Dimen) + v.CurF = make([]float64, p.Dimen) + v.CurV = make([]float64, p.Dimen) + v.BestX = make([]float64, p.Dimen) + v.BestC = make([]float64, p.Dimen) + v.BestF = make([]float64, p.Dimen) + } + } +} + +func (p *Population) SelectDonors(currentIdx int) []PopulationIndividual { + popCount := p.Size() + + idcs := make([]int, 0, popCount-1) + + // gather indices. + for k := 0; k < popCount; k++ { + if k != currentIdx { + idcs = append(idcs, k) + } + } + + // randomly choose 3 of those idcs. + selectedIdcs := make([]int, 0) + + selectedA := false + selectedB := false + selectedC := false + + for !selectedA { + candidateA := rand.Intn(len(idcs)) % len(idcs) + + if candidateA != currentIdx { + selectedIdcs = append(selectedIdcs, candidateA) + selectedA = true + } + } + + for !selectedB { + a := selectedIdcs[0] + candidateB := rand.Intn(len(idcs)) % len(idcs) + + if candidateB != currentIdx && candidateB != a { + selectedIdcs = append(selectedIdcs, candidateB) + selectedB = true + } + } + + for !selectedC { + a := selectedIdcs[0] + b := selectedIdcs[1] + candidateC := rand.Intn(len(idcs)) % len(idcs) + + if candidateC != currentIdx && candidateC != a && candidateC != b { + selectedIdcs = append(selectedIdcs, candidateC) + selectedC = true + } + } + + // selected contains the selected population individuals. + selected := make([]PopulationIndividual, 0) + + // select individuals for donation. + for _, idx := range selectedIdcs { + for k := 0; k < popCount; k++ { + if k == idx { + selected = append(selected, p.Population[idx]) + } + } + } + + return selected +} + +// MeanVelocity computes the mean current velocity of all individuals in the population. +func (p *Population) MeanVelocity() float64 { return 0.0 } + +// Size returns the number of population individuals. +func (p *Population) Size() int { + return len(p.Population) +} + +// newPopulation returns a pointer to a new, uninitialised population. +func newPopulation(benchProblem string, np, dimen int) *Population { + p := &Population{} + + p.Problem = benchProblem + + p.ProblemFunc = cec2020.Functions[benchProblem] + + p.Dimen = dimen + + // pre-alloc. + p.Population = make([]PopulationIndividual, np) + + return p +}