nanobench: Add per-run time reporting.
This patch adds per-benchmark-iteration times to our JSON output. Given that we
already collect these statistics, giving them to the user would be nice.
No unit-test provided, since `rgrep -i json tests` yielded nothing. Happy to add
one if someone wants.
BUG=None.
TEST=nanobench now writes per-run timinings with the output JSON.
Change-Id: I910f1d97fd3e0ee69fc8e78e011e67b9c866f18d
Reviewed-on: https://skia-review.googlesource.com/5617
Reviewed-by: Mike Klein <mtklein@chromium.org>
Commit-Queue: Ravi Mistry <rmistry@google.com>
diff --git a/bench/ResultsWriter.h b/bench/ResultsWriter.h
index dc50ace..1d6dc3e 100644
--- a/bench/ResultsWriter.h
+++ b/bench/ResultsWriter.h
@@ -47,6 +47,9 @@
// Record a single test metric.
virtual void metric(const char name[], double ms) {}
+ // Record a list of test metrics.
+ virtual void metrics(const char name[], const SkTArray<double>& array) {}
+
// Flush to storage now please.
virtual void flush() {}
};
@@ -114,6 +117,16 @@
SkASSERT(fConfig);
(*fConfig)[name] = ms;
}
+ void metrics(const char name[], const SkTArray<double>& array) override {
+ SkASSERT(fConfig);
+ Json::Value value = Json::Value(Json::arrayValue);
+ value.resize(array.count());
+ for (int i = 0; i < array.count(); i++) {
+ // Don't care about nan-ness.
+ value[i] = array[i];
+ }
+ (*fConfig)[name] = std::move(value);
+ }
// Flush to storage now please.
void flush() override {
diff --git a/bench/nanobench.cpp b/bench/nanobench.cpp
index 5abecd6..9e641b9 100644
--- a/bench/nanobench.cpp
+++ b/bench/nanobench.cpp
@@ -1288,6 +1288,7 @@
benchStream.fillCurrentOptions(log.get());
target->fillOptions(log.get());
log->metric("min_ms", stats.min);
+ log->metrics("samples", samples);
#if SK_SUPPORT_GPU
if (gpuStatsDump) {
// dump to json, only SKPBench currently returns valid keys / values