diff --git a/algo/algo.go b/algo/algo.go index 0396752..9ca26e1 100644 --- a/algo/algo.go +++ b/algo/algo.go @@ -47,7 +47,7 @@ func DoRandomSearch(wg *sync.WaitGroup) { go plotAllDims(algoStats[i], "plot", ".svg", &plotWg) } - stats.PrintStatisticTable(algoStats) + stats.SaveTable("Random Search", algoStats) plotWg.Wait() } @@ -83,7 +83,7 @@ func DoStochasticHillClimbing(wg *sync.WaitGroup) { go plotAllDims(algoStat, "plot", ".svg", &plotWg) } - stats.PrintStatisticTable(algoStats) + stats.SaveTable("Stochastic Hill CLimbing", algoStats) plotWg.Wait() } diff --git a/report/table.go b/report/table.go index d673796..62c00d3 100644 --- a/report/table.go +++ b/report/table.go @@ -37,6 +37,16 @@ func SaveTableToFile(t Table) { safeName := util.SanitiseFName(t.Algo) texTableFile := GetTexDir() + "table-" + safeName + ".tex" tmplTableFile := "report/table.tmpl" + + if _, err := os.Stat(tmplTableFile); err != nil { + // TODO(me): fix this. + // this block is relevant for the unit test path, somehow the file is + // not found as defined above. + log.Println(err, `, weird test behaviour , prepending "../"`) + + tmplTableFile = "../" + tmplTableFile + } + tmplTable := template.Must(template.ParseFiles(tmplTableFile)) f, err := os.Create(texTableFile) diff --git a/stats/table.go b/stats/table.go index 37346d8..b3ffd68 100644 --- a/stats/table.go +++ b/stats/table.go @@ -5,65 +5,65 @@ package stats import ( "fmt" - "os" + "git.dotya.ml/wanderer/math-optim/report" "gonum.org/v1/gonum/floats" "gonum.org/v1/gonum/stat" ) -// statsRow represents the header in the table. -type statsHdr struct { - Algo string - BenchFuncName string - Dimens int - Generations int - Iterations int +// getColLayout returns a string slice of Latex table column alignment +// settings. +func getColLayout() []string { + return []string{"c", "c", "c", "c", "c"} } -// statsRow represents a single row in the table. -type statsRow struct { - Min float64 - Max float64 - Mean float64 - Median float64 - StdDev float64 +// getColNames returns names of table columns, i.e. statistical features we are +// interested in. +func getColNames() []string { + // the first column describes specific iteration settings and is therefore + // dynamically set, hence not present here nor mentioned in + // `getColLayout()`. + return []string{"min", "max", "mean", "median", "stddev"} } -// PrintStatisticTable prints to console computed statistics for current algo. -func PrintStatisticTable(algoStats [][]Stats) { - fmt.Fprintln(os.Stderr, "printing statistic table data (min, max, mean, median, stddev)") +// SaveTable sifts through computed values, organises data in table-like +// structure as defined in the `report` pkg and passes it on to be fed to a +// tmpl, result of which is then saved in a `.tex` file (filename based on the +// algo name string). +func SaveTable(algo string, algoStats [][]Stats) { + table := report.NewTable() + + table.Algo = algo + table.Header = getColNames() + table.ColLayout = getColLayout() for _, singleFunc := range algoStats { - fmt.Fprintf(os.Stderr, "%+v\n", statsSingleFunc(singleFunc)) + // append/merge(...) if necessary. + table.Rows = append(table.Rows, parseSingleBenchStats(singleFunc)...) } + + report.SaveTableToFile(*table) } -// statsSingleFunc computes statistics out of results of a single bench func -// and returns the statistic table as a []interface{} because it contains -// multiple table headers and rows. perhaps a table struct could be returned in -// the future. -func statsSingleFunc(singleFuncStats []Stats) []interface{} { - var ( - // hdr is the table header as determined based on the data being dealt with. - hdr statsHdr - // row contains the data of the statistic properties being tracked. - row statsRow - ) +// parseSingleBenchStats processes results of a particular bench and constructs +// statistics. +func parseSingleBenchStats(benchStats []Stats) []report.Row { + rows := make([]report.Row, 0) - // out contains the constructed table and is returned at the end of - // this func. - out := make([]interface{}, 0) - - for _, s := range singleFuncStats { + for _, s := range benchStats { for _, dim := range s.BenchFuncStats { - hdr = makeTableHdr( - s.Algo, - s.BenchFuncStats[0].BenchName, + row := report.NewRow() + + row.Title = "D=" + fmt.Sprint(s.Dimens) + ", f=" + dim.BenchName + + ", G=" + fmt.Sprint(s.Generations) + + ", I=" + fmt.Sprint(s.Iterations) + + row.Title = makeRowTitle( + dim.BenchName, s.Dimens, s.Generations, s.Iterations, ) - out = append(out, "\n", hdr, "\n") // collect the best. var best []float64 @@ -74,39 +74,31 @@ func statsSingleFunc(singleFuncStats []Stats) []interface{} { best = append(best, iter.Results[last]) } - row.Min = floats.Min(best) - row.Max = floats.Max(best) - row.Mean = stat.Mean(best, nil) - row.Median = stat.Mean(best, nil) - row.StdDev = stat.StdDev(best, nil) + row.Values = statsFromBest(best) - out = append(out, row) + rows = append(rows, *row) } } - out = append(out, "\n") - - return out + return rows } -// makeTableHdr fills the table header with passed information. -// TODO(me): add checks to assert only valid inputs are passed in. -func makeTableHdr( - algo, benchFuncName string, - dimens, generations, iterations int, -) statsHdr { - hdr := newStatsHdr() - - hdr.Algo = algo - hdr.BenchFuncName = benchFuncName - hdr.Dimens = dimens - hdr.Generations = generations - hdr.Iterations = iterations - - return *hdr +func makeRowTitle(bench string, dimens, generations, iterations int) string { + return "D=" + fmt.Sprint(dimens) + ", f=" + bench + + ", G=" + fmt.Sprint(generations) + + ", I=" + fmt.Sprint(iterations) } -// newStatsHdr returns a pointer to a newly created statsHdr instance. -func newStatsHdr() *statsHdr { - return &statsHdr{} +// statsFromBest computes the actual statistics upon the slice of best results, +// returns a slice of float64s. +func statsFromBest(best []float64) []float64 { + s := make([]float64, len(getColNames())) + + s[0] = floats.Min(best) + s[1] = floats.Max(best) + s[2] = stat.Mean(best, nil) + s[3] = stat.Mean(best, nil) + s[4] = stat.StdDev(best, nil) + + return s } diff --git a/stats/table_test.go b/stats/table_test.go index 319424e..fe8693a 100644 --- a/stats/table_test.go +++ b/stats/table_test.go @@ -5,31 +5,20 @@ package stats import ( "testing" + + "git.dotya.ml/wanderer/math-optim/report" ) -func TestMakeTableHdr(t *testing.T) { - want := statsHdr{ - Algo: "Random Search", - BenchFuncName: "Schwefel", - Dimens: 10, - Generations: 3000, - Iterations: 30, - } - got := makeTableHdr("Random Search", "Schwefel", 10, 3000, 30) +func TestMakeRowTitle(t *testing.T) { + want := `D=10, f=Schwefel, G=3000, I=30` + got := makeRowTitle("Schwefel", 10, 3000, 30) if want != got { - t.Errorf("wrong hdr, want: %+v, got: %+v", want, got) + t.Errorf("wrong row title, want: %+q, got: %+q", want, got) } } -func TestNewStatsHdr(t *testing.T) { - if want := newStatsHdr(); want == nil { - t.Error("could not create hdr") - } -} - -func TestStatsSingleFunc(t *testing.T) { - testHdr := makeTableHdr("Aladeen Search", "De Jong 5th", 5, 3, 4) +func TestParseBenchStats(t *testing.T) { benchFuncStats := []FuncStats{ { BenchName: "De Jong 5th", @@ -50,22 +39,26 @@ func TestStatsSingleFunc(t *testing.T) { Generations: 3, }, } - want := []interface{}{ - "\n", - testHdr, - "\n", - statsRow{1336.3261400058473, 2169.7378893600176, 1737.765688971335, 1737.765688971335, 342.37072192259393}, - "\n", - } - got := statsSingleFunc(testStats) - if len(want) != len(got) { - t.Errorf("outputs are of different sizes, want: %d, got: %d", len(want), len(got)) + wantResults := []float64{1336.3261400058473, 2169.7378893600176, 1737.765688971335, 1737.765688971335, 342.37072192259393} + want := report.Row{ + Title: "D=5, f=De Jong 5th, G=3, I=4", + Values: wantResults, + } + // expecting a singlerow so we're accessing it directly. + got := parseSingleBenchStats(testStats)[0] + + if len(want.Values) != len(got.Values) { + t.Errorf("outputs are of different sizes, want: %d, got: %d", len(want.Values), len(got.Values)) } - for i := range want { - if want[i] != got[i] { - t.Errorf("outputs don't match, want: %+v, got: %+v", want, got) + if want.Title != got.Title { + t.Errorf("output titles differ, want: %q, got: %q", want.Title, got.Title) + } + + for i := range want.Values { + if want.Values[i] != got.Values[i] { + t.Errorf("outputs don't match,\n\twant: %+v,\n\tgot: %+v", want, got) } } }