fix(benchmarks): fix three parsing bugs in parse_results.go and bench_test.go

- parse_results.go: fix metric extraction order — Go outputs custom metrics
  (p50_µs, p95_µs, p99_µs, req/s) BEFORE B/op and allocs/op on the benchmark
  line. The old positional regex had B/op first, so p50/p95/p99 were always
  empty in latency_report.csv. Replaced with separate regexps for each field
  so order no longer matters.

- parse_results.go: remove p95_latency_ms column from throughput_report.csv —
  parallel sweep files only emit ns/op and req/s, never p95 data. The column
  was structurally always empty.

- bench_test.go: remove fmt.Printf from BenchmarkBAPCaller_RPS — the debug
  print raced with Go's own benchmark output line, garbling the result to
  'BenchmarkRPS-N  RPS: N over Ns' which the framework could not parse,
  causing req/s to never appear in the structured output. b.ReportMetric
  alone is sufficient.
This commit is contained in:
Mayuresh
2026-04-09 17:01:13 +05:30
parent 1a6acfc260
commit 23e39722d2
2 changed files with 46 additions and 51 deletions

View File

@@ -1,7 +1,6 @@
package e2e_bench_test
import (
"fmt"
"net/http"
"sort"
"testing"
@@ -162,9 +161,7 @@ func BenchmarkBAPCaller_RPS(b *testing.B) {
elapsed := time.Since(start).Seconds()
if elapsed > 0 {
rps := float64(count) / elapsed
b.ReportMetric(rps, "req/s")
fmt.Printf(" RPS: %.0f over %.1fs\n", rps, elapsed)
b.ReportMetric(float64(count)/elapsed, "req/s")
}
}