Distinguish common and unique names for skiaperf.com.
Turns out we tack on the size post-facto in ResultsWriter::bench(), so the only
place we need getUniqueName() to differ from getName() is SKPBench.
BUG=skia:
R=jcgregorio@google.com, mtklein@google.com
Author: mtklein@chromium.org
Review URL: https://codereview.chromium.org/552303004
diff --git a/bench/Benchmark.cpp b/bench/Benchmark.cpp
index 04223dc..9205ba9 100644
--- a/bench/Benchmark.cpp
+++ b/bench/Benchmark.cpp
@@ -24,6 +24,10 @@
return this->onGetName();
}
+const char* Benchmark::getUniqueName() {
+ return this->onGetUniqueName();
+}
+
SkIPoint Benchmark::getSize() {
return this->onGetSize();
}
diff --git a/bench/Benchmark.h b/bench/Benchmark.h
index 8e8eeff..508d1da 100644
--- a/bench/Benchmark.h
+++ b/bench/Benchmark.h
@@ -49,6 +49,7 @@
Benchmark();
const char* getName();
+ const char* getUniqueName();
SkIPoint getSize();
enum Backend {
@@ -98,6 +99,7 @@
virtual void setupPaint(SkPaint* paint);
virtual const char* onGetName() = 0;
+ virtual const char* onGetUniqueName() { return this->onGetName(); }
virtual void onPreDraw() {}
// Each bench should do its main work in a loop like this:
// for (int i = 0; i < loops; i++) { <work here> }
diff --git a/bench/SKPBench.cpp b/bench/SKPBench.cpp
index 308cea6..9d822d9 100644
--- a/bench/SKPBench.cpp
+++ b/bench/SKPBench.cpp
@@ -10,14 +10,19 @@
SKPBench::SKPBench(const char* name, const SkPicture* pic, const SkIRect& clip, SkScalar scale)
: fPic(SkRef(pic))
, fClip(clip)
- , fScale(scale) {
- fName.printf("%s_%.2g", name, scale);
+ , fScale(scale)
+ , fName(name) {
+ fUniqueName.printf("%s_%.2g", name, scale); // Scale makes this unqiue for skiaperf.com traces.
}
const char* SKPBench::onGetName() {
return fName.c_str();
}
+const char* SKPBench::onGetUniqueName() {
+ return fUniqueName.c_str();
+}
+
bool SKPBench::isSuitableFor(Backend backend) {
return backend != kNonRendering_Backend;
}
diff --git a/bench/SKPBench.h b/bench/SKPBench.h
index 9cc9192..0b0e008 100644
--- a/bench/SKPBench.h
+++ b/bench/SKPBench.h
@@ -21,6 +21,7 @@
protected:
virtual const char* onGetName() SK_OVERRIDE;
+ virtual const char* onGetUniqueName() SK_OVERRIDE;
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE;
virtual void onDraw(const int loops, SkCanvas* canvas) SK_OVERRIDE;
virtual SkIPoint onGetSize() SK_OVERRIDE;
@@ -30,6 +31,7 @@
const SkIRect fClip;
const SkScalar fScale;
SkString fName;
+ SkString fUniqueName;
typedef Benchmark INHERITED;
};
diff --git a/bench/nanobench.cpp b/bench/nanobench.cpp
index 20afdeb..853492b 100644
--- a/bench/nanobench.cpp
+++ b/bench/nanobench.cpp
@@ -163,7 +163,7 @@
while (bench_plus_overhead < overhead) {
if (round++ == FLAGS_maxCalibrationAttempts) {
SkDebugf("WARNING: Can't estimate loops for %s (%s vs. %s); skipping.\n",
- bench->getName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
+ bench->getUniqueName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
return kFailedLoops;
}
bench_plus_overhead = time(1, bench, canvas, NULL);
@@ -592,7 +592,7 @@
BenchmarkStream benchStream;
while (Benchmark* b = benchStream.next()) {
SkAutoTDelete<Benchmark> bench(b);
- if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) {
+ if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getUniqueName())) {
continue;
}
@@ -600,7 +600,7 @@
create_targets(&targets, bench.get(), configs);
if (!targets.isEmpty()) {
- log->bench(bench->getName(), bench->getSize().fX, bench->getSize().fY);
+ log->bench(bench->getUniqueName(), bench->getSize().fX, bench->getSize().fY);
bench->preDraw();
}
for (int j = 0; j < targets.count(); j++) {
@@ -617,7 +617,7 @@
if (canvas && !FLAGS_writePath.isEmpty() && FLAGS_writePath[0]) {
SkString pngFilename = SkOSPath::Join(FLAGS_writePath[0], config);
- pngFilename = SkOSPath::Join(pngFilename.c_str(), bench->getName());
+ pngFilename = SkOSPath::Join(pngFilename.c_str(), bench->getUniqueName());
pngFilename.append(".png");
write_canvas_png(canvas, pngFilename);
}
@@ -629,6 +629,7 @@
Stats stats(samples.get(), FLAGS_samples);
log->config(config);
+ log->configOption("name", bench->getName());
benchStream.fillCurrentOptions(log.get());
#if SK_SUPPORT_GPU
if (Benchmark::kGPU_Backend == targets[j]->config.backend) {
@@ -645,17 +646,17 @@
if (targets.count() == 1) {
config = ""; // Only print the config if we run the same bench on more than one.
}
- SkDebugf("%s\t%s\n", bench->getName(), config);
+ SkDebugf("%s\t%s\n", bench->getUniqueName(), config);
} else if (FLAGS_verbose) {
for (int i = 0; i < FLAGS_samples; i++) {
SkDebugf("%s ", HUMANIZE(samples[i]));
}
- SkDebugf("%s\n", bench->getName());
+ SkDebugf("%s\n", bench->getUniqueName());
} else if (FLAGS_quiet) {
if (targets.count() == 1) {
config = ""; // Only print the config if we run the same bench on more than one.
}
- SkDebugf("%s\t%s\t%s\n", HUMANIZE(stats.median), bench->getName(), config);
+ SkDebugf("%s\t%s\t%s\n", HUMANIZE(stats.median), bench->getUniqueName(), config);
} else {
const double stddev_percent = 100 * sqrt(stats.var) / stats.mean;
SkDebugf("%4dM\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s\n"
@@ -668,7 +669,7 @@
, stddev_percent
, stats.plot.c_str()
, config
- , bench->getName()
+ , bench->getUniqueName()
);
}
}