go(stats): rework table saving,structure
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
* create tables as defined in the report pkg. * add new funcs to calculate stats and prepare the necessary table structs. * rework existing `stats` tests * handle a weird test behaviour when tmpl at `report/table.tmpl` is not found when testing a `stats` func.
This commit is contained in:
parent
2697b296b5
commit
621f56f5e3
@ -47,7 +47,7 @@ func DoRandomSearch(wg *sync.WaitGroup) {
|
|||||||
go plotAllDims(algoStats[i], "plot", ".svg", &plotWg)
|
go plotAllDims(algoStats[i], "plot", ".svg", &plotWg)
|
||||||
}
|
}
|
||||||
|
|
||||||
stats.PrintStatisticTable(algoStats)
|
stats.SaveTable("Random Search", algoStats)
|
||||||
|
|
||||||
plotWg.Wait()
|
plotWg.Wait()
|
||||||
}
|
}
|
||||||
@ -83,7 +83,7 @@ func DoStochasticHillClimbing(wg *sync.WaitGroup) {
|
|||||||
go plotAllDims(algoStat, "plot", ".svg", &plotWg)
|
go plotAllDims(algoStat, "plot", ".svg", &plotWg)
|
||||||
}
|
}
|
||||||
|
|
||||||
stats.PrintStatisticTable(algoStats)
|
stats.SaveTable("Stochastic Hill CLimbing", algoStats)
|
||||||
|
|
||||||
plotWg.Wait()
|
plotWg.Wait()
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,16 @@ func SaveTableToFile(t Table) {
|
|||||||
safeName := util.SanitiseFName(t.Algo)
|
safeName := util.SanitiseFName(t.Algo)
|
||||||
texTableFile := GetTexDir() + "table-" + safeName + ".tex"
|
texTableFile := GetTexDir() + "table-" + safeName + ".tex"
|
||||||
tmplTableFile := "report/table.tmpl"
|
tmplTableFile := "report/table.tmpl"
|
||||||
|
|
||||||
|
if _, err := os.Stat(tmplTableFile); err != nil {
|
||||||
|
// TODO(me): fix this.
|
||||||
|
// this block is relevant for the unit test path, somehow the file is
|
||||||
|
// not found as defined above.
|
||||||
|
log.Println(err, `, weird test behaviour , prepending "../"`)
|
||||||
|
|
||||||
|
tmplTableFile = "../" + tmplTableFile
|
||||||
|
}
|
||||||
|
|
||||||
tmplTable := template.Must(template.ParseFiles(tmplTableFile))
|
tmplTable := template.Must(template.ParseFiles(tmplTableFile))
|
||||||
|
|
||||||
f, err := os.Create(texTableFile)
|
f, err := os.Create(texTableFile)
|
||||||
|
124
stats/table.go
124
stats/table.go
@ -5,65 +5,65 @@ package stats
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
|
|
||||||
|
"git.dotya.ml/wanderer/math-optim/report"
|
||||||
"gonum.org/v1/gonum/floats"
|
"gonum.org/v1/gonum/floats"
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
)
|
)
|
||||||
|
|
||||||
// statsRow represents the header in the table.
|
// getColLayout returns a string slice of Latex table column alignment
|
||||||
type statsHdr struct {
|
// settings.
|
||||||
Algo string
|
func getColLayout() []string {
|
||||||
BenchFuncName string
|
return []string{"c", "c", "c", "c", "c"}
|
||||||
Dimens int
|
|
||||||
Generations int
|
|
||||||
Iterations int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// statsRow represents a single row in the table.
|
// getColNames returns names of table columns, i.e. statistical features we are
|
||||||
type statsRow struct {
|
// interested in.
|
||||||
Min float64
|
func getColNames() []string {
|
||||||
Max float64
|
// the first column describes specific iteration settings and is therefore
|
||||||
Mean float64
|
// dynamically set, hence not present here nor mentioned in
|
||||||
Median float64
|
// `getColLayout()`.
|
||||||
StdDev float64
|
return []string{"min", "max", "mean", "median", "stddev"}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintStatisticTable prints to console computed statistics for current algo.
|
// SaveTable sifts through computed values, organises data in table-like
|
||||||
func PrintStatisticTable(algoStats [][]Stats) {
|
// structure as defined in the `report` pkg and passes it on to be fed to a
|
||||||
fmt.Fprintln(os.Stderr, "printing statistic table data (min, max, mean, median, stddev)")
|
// tmpl, result of which is then saved in a `.tex` file (filename based on the
|
||||||
|
// algo name string).
|
||||||
|
func SaveTable(algo string, algoStats [][]Stats) {
|
||||||
|
table := report.NewTable()
|
||||||
|
|
||||||
|
table.Algo = algo
|
||||||
|
table.Header = getColNames()
|
||||||
|
table.ColLayout = getColLayout()
|
||||||
|
|
||||||
for _, singleFunc := range algoStats {
|
for _, singleFunc := range algoStats {
|
||||||
fmt.Fprintf(os.Stderr, "%+v\n", statsSingleFunc(singleFunc))
|
// append/merge(...) if necessary.
|
||||||
|
table.Rows = append(table.Rows, parseSingleBenchStats(singleFunc)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
report.SaveTableToFile(*table)
|
||||||
}
|
}
|
||||||
|
|
||||||
// statsSingleFunc computes statistics out of results of a single bench func
|
// parseSingleBenchStats processes results of a particular bench and constructs
|
||||||
// and returns the statistic table as a []interface{} because it contains
|
// statistics.
|
||||||
// multiple table headers and rows. perhaps a table struct could be returned in
|
func parseSingleBenchStats(benchStats []Stats) []report.Row {
|
||||||
// the future.
|
rows := make([]report.Row, 0)
|
||||||
func statsSingleFunc(singleFuncStats []Stats) []interface{} {
|
|
||||||
var (
|
|
||||||
// hdr is the table header as determined based on the data being dealt with.
|
|
||||||
hdr statsHdr
|
|
||||||
// row contains the data of the statistic properties being tracked.
|
|
||||||
row statsRow
|
|
||||||
)
|
|
||||||
|
|
||||||
// out contains the constructed table and is returned at the end of
|
for _, s := range benchStats {
|
||||||
// this func.
|
|
||||||
out := make([]interface{}, 0)
|
|
||||||
|
|
||||||
for _, s := range singleFuncStats {
|
|
||||||
for _, dim := range s.BenchFuncStats {
|
for _, dim := range s.BenchFuncStats {
|
||||||
hdr = makeTableHdr(
|
row := report.NewRow()
|
||||||
s.Algo,
|
|
||||||
s.BenchFuncStats[0].BenchName,
|
row.Title = "D=" + fmt.Sprint(s.Dimens) + ", f=" + dim.BenchName +
|
||||||
|
", G=" + fmt.Sprint(s.Generations) +
|
||||||
|
", I=" + fmt.Sprint(s.Iterations)
|
||||||
|
|
||||||
|
row.Title = makeRowTitle(
|
||||||
|
dim.BenchName,
|
||||||
s.Dimens,
|
s.Dimens,
|
||||||
s.Generations,
|
s.Generations,
|
||||||
s.Iterations,
|
s.Iterations,
|
||||||
)
|
)
|
||||||
out = append(out, "\n", hdr, "\n")
|
|
||||||
|
|
||||||
// collect the best.
|
// collect the best.
|
||||||
var best []float64
|
var best []float64
|
||||||
@ -74,39 +74,31 @@ func statsSingleFunc(singleFuncStats []Stats) []interface{} {
|
|||||||
best = append(best, iter.Results[last])
|
best = append(best, iter.Results[last])
|
||||||
}
|
}
|
||||||
|
|
||||||
row.Min = floats.Min(best)
|
row.Values = statsFromBest(best)
|
||||||
row.Max = floats.Max(best)
|
|
||||||
row.Mean = stat.Mean(best, nil)
|
|
||||||
row.Median = stat.Mean(best, nil)
|
|
||||||
row.StdDev = stat.StdDev(best, nil)
|
|
||||||
|
|
||||||
out = append(out, row)
|
rows = append(rows, *row)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out = append(out, "\n")
|
return rows
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeTableHdr fills the table header with passed information.
|
func makeRowTitle(bench string, dimens, generations, iterations int) string {
|
||||||
// TODO(me): add checks to assert only valid inputs are passed in.
|
return "D=" + fmt.Sprint(dimens) + ", f=" + bench +
|
||||||
func makeTableHdr(
|
", G=" + fmt.Sprint(generations) +
|
||||||
algo, benchFuncName string,
|
", I=" + fmt.Sprint(iterations)
|
||||||
dimens, generations, iterations int,
|
|
||||||
) statsHdr {
|
|
||||||
hdr := newStatsHdr()
|
|
||||||
|
|
||||||
hdr.Algo = algo
|
|
||||||
hdr.BenchFuncName = benchFuncName
|
|
||||||
hdr.Dimens = dimens
|
|
||||||
hdr.Generations = generations
|
|
||||||
hdr.Iterations = iterations
|
|
||||||
|
|
||||||
return *hdr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStatsHdr returns a pointer to a newly created statsHdr instance.
|
// statsFromBest computes the actual statistics upon the slice of best results,
|
||||||
func newStatsHdr() *statsHdr {
|
// returns a slice of float64s.
|
||||||
return &statsHdr{}
|
func statsFromBest(best []float64) []float64 {
|
||||||
|
s := make([]float64, len(getColNames()))
|
||||||
|
|
||||||
|
s[0] = floats.Min(best)
|
||||||
|
s[1] = floats.Max(best)
|
||||||
|
s[2] = stat.Mean(best, nil)
|
||||||
|
s[3] = stat.Mean(best, nil)
|
||||||
|
s[4] = stat.StdDev(best, nil)
|
||||||
|
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
@ -5,31 +5,20 @@ package stats
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.dotya.ml/wanderer/math-optim/report"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMakeTableHdr(t *testing.T) {
|
func TestMakeRowTitle(t *testing.T) {
|
||||||
want := statsHdr{
|
want := `D=10, f=Schwefel, G=3000, I=30`
|
||||||
Algo: "Random Search",
|
got := makeRowTitle("Schwefel", 10, 3000, 30)
|
||||||
BenchFuncName: "Schwefel",
|
|
||||||
Dimens: 10,
|
|
||||||
Generations: 3000,
|
|
||||||
Iterations: 30,
|
|
||||||
}
|
|
||||||
got := makeTableHdr("Random Search", "Schwefel", 10, 3000, 30)
|
|
||||||
|
|
||||||
if want != got {
|
if want != got {
|
||||||
t.Errorf("wrong hdr, want: %+v, got: %+v", want, got)
|
t.Errorf("wrong row title, want: %+q, got: %+q", want, got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewStatsHdr(t *testing.T) {
|
func TestParseBenchStats(t *testing.T) {
|
||||||
if want := newStatsHdr(); want == nil {
|
|
||||||
t.Error("could not create hdr")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStatsSingleFunc(t *testing.T) {
|
|
||||||
testHdr := makeTableHdr("Aladeen Search", "De Jong 5th", 5, 3, 4)
|
|
||||||
benchFuncStats := []FuncStats{
|
benchFuncStats := []FuncStats{
|
||||||
{
|
{
|
||||||
BenchName: "De Jong 5th",
|
BenchName: "De Jong 5th",
|
||||||
@ -50,22 +39,26 @@ func TestStatsSingleFunc(t *testing.T) {
|
|||||||
Generations: 3,
|
Generations: 3,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
want := []interface{}{
|
|
||||||
"\n",
|
|
||||||
testHdr,
|
|
||||||
"\n",
|
|
||||||
statsRow{1336.3261400058473, 2169.7378893600176, 1737.765688971335, 1737.765688971335, 342.37072192259393},
|
|
||||||
"\n",
|
|
||||||
}
|
|
||||||
got := statsSingleFunc(testStats)
|
|
||||||
|
|
||||||
if len(want) != len(got) {
|
wantResults := []float64{1336.3261400058473, 2169.7378893600176, 1737.765688971335, 1737.765688971335, 342.37072192259393}
|
||||||
t.Errorf("outputs are of different sizes, want: %d, got: %d", len(want), len(got))
|
want := report.Row{
|
||||||
|
Title: "D=5, f=De Jong 5th, G=3, I=4",
|
||||||
|
Values: wantResults,
|
||||||
|
}
|
||||||
|
// expecting a singlerow so we're accessing it directly.
|
||||||
|
got := parseSingleBenchStats(testStats)[0]
|
||||||
|
|
||||||
|
if len(want.Values) != len(got.Values) {
|
||||||
|
t.Errorf("outputs are of different sizes, want: %d, got: %d", len(want.Values), len(got.Values))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range want {
|
if want.Title != got.Title {
|
||||||
if want[i] != got[i] {
|
t.Errorf("output titles differ, want: %q, got: %q", want.Title, got.Title)
|
||||||
t.Errorf("outputs don't match, want: %+v, got: %+v", want, got)
|
}
|
||||||
|
|
||||||
|
for i := range want.Values {
|
||||||
|
if want.Values[i] != got.Values[i] {
|
||||||
|
t.Errorf("outputs don't match,\n\twant: %+v,\n\tgot: %+v", want, got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user