blob: 59e85ed9e6d0bca42afd8755a29652a7980161e8 [file] [log] [blame]
mtkleinf3723212014-06-25 14:08:00 -07001/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
mtkleinbb6a0282014-07-01 08:43:42 -07008#include <ctype.h>
9
mtkleinf3723212014-06-25 14:08:00 -070010#include "Benchmark.h"
11#include "CrashHandler.h"
mtklein60317d0f2014-07-14 11:30:37 -070012#include "ResultsWriter.h"
mtkleinf3723212014-06-25 14:08:00 -070013#include "Stats.h"
14#include "Timer.h"
15
16#include "SkCanvas.h"
17#include "SkCommandLineFlags.h"
18#include "SkForceLinking.h"
19#include "SkGraphics.h"
20#include "SkString.h"
21#include "SkSurface.h"
22
mtkleinbb6a0282014-07-01 08:43:42 -070023#if SK_SUPPORT_GPU
24 #include "GrContextFactory.h"
25 GrContextFactory gGrFactory;
26#endif
27
mtkleinf3723212014-06-25 14:08:00 -070028__SK_FORCE_IMAGE_DECODER_LINKING;
29
mtkleina189ccd2014-07-14 12:28:47 -070030#if SK_DEBUG
31 DEFINE_bool(runOnce, true, "Run each benchmark just once?");
32#else
33 DEFINE_bool(runOnce, false, "Run each benchmark just once?");
34#endif
35
mtkleinf3723212014-06-25 14:08:00 -070036DEFINE_int32(samples, 10, "Number of samples to measure for each bench.");
37DEFINE_int32(overheadLoops, 100000, "Loops to estimate timer overhead.");
38DEFINE_double(overheadGoal, 0.0001,
39 "Loop until timer overhead is at most this fraction of our measurments.");
40DEFINE_string(match, "", "The usual filters on file names of benchmarks to measure.");
41DEFINE_bool2(quiet, q, false, "Print only bench name and minimum sample.");
42DEFINE_bool2(verbose, v, false, "Print all samples.");
mtkleinbb6a0282014-07-01 08:43:42 -070043DEFINE_string(config, "nonrendering 8888 gpu", "Configs to measure. Options: "
44 "565 8888 gpu nonrendering debug nullgpu msaa4 msaa16 nvprmsaa4 nvprmsaa16 angle");
45DEFINE_double(gpuMs, 5, "Target bench time in millseconds for GPU.");
46DEFINE_int32(gpuFrameLag, 5, "Overestimate of maximum number of frames GPU allows to lag.");
mtkleinf3723212014-06-25 14:08:00 -070047
mtklein40b32be2014-07-09 08:46:49 -070048DEFINE_bool(cpu, true, "Master switch for CPU-bound work.");
49DEFINE_bool(gpu, true, "Master switch for GPU-bound work.");
50
mtklein60317d0f2014-07-14 11:30:37 -070051DEFINE_string(outResultsFile, "", "If given, write results here as JSON.");
mtklein1e319f72014-07-15 08:27:06 -070052DEFINE_bool(resetGpuContext, true, "Reset the GrContext before running each bench.");
mtklein55b0ffc2014-07-17 08:38:23 -070053DEFINE_int32(maxCalibrationAttempts, 3,
54 "Try up to this many times to guess loops for a bench, or skip the bench.");
55DEFINE_int32(maxLoops, 1000000, "Never run a bench more times than this.");
mtklein60317d0f2014-07-14 11:30:37 -070056
mtkleinf3723212014-06-25 14:08:00 -070057
58static SkString humanize(double ms) {
59 if (ms > 1e+3) return SkStringPrintf("%.3gs", ms/1e3);
60 if (ms < 1e-3) return SkStringPrintf("%.3gns", ms*1e6);
mtklein62386882014-07-15 10:30:31 -070061#ifdef SK_BUILD_FOR_WIN
62 if (ms < 1) return SkStringPrintf("%.3gus", ms*1e3);
63#else
mtkleinf3723212014-06-25 14:08:00 -070064 if (ms < 1) return SkStringPrintf("%.3gµs", ms*1e3);
mtklein62386882014-07-15 10:30:31 -070065#endif
mtkleinf3723212014-06-25 14:08:00 -070066 return SkStringPrintf("%.3gms", ms);
67}
mtklein55b0ffc2014-07-17 08:38:23 -070068#define HUMANIZE(ms) humanize(ms).c_str()
mtkleinf3723212014-06-25 14:08:00 -070069
mtkleinbb6a0282014-07-01 08:43:42 -070070static double time(int loops, Benchmark* bench, SkCanvas* canvas, SkGLContextHelper* gl) {
71 WallTimer timer;
72 timer.start();
73 if (bench) {
74 bench->draw(loops, canvas);
75 }
76 if (canvas) {
77 canvas->flush();
78 }
79#if SK_SUPPORT_GPU
80 if (gl) {
81 SK_GL(*gl, Flush());
82 gl->swapBuffers();
83 }
84#endif
85 timer.end();
86 return timer.fWall;
87}
88
mtkleinf3723212014-06-25 14:08:00 -070089static double estimate_timer_overhead() {
90 double overhead = 0;
mtkleinf3723212014-06-25 14:08:00 -070091 for (int i = 0; i < FLAGS_overheadLoops; i++) {
mtkleinbb6a0282014-07-01 08:43:42 -070092 overhead += time(1, NULL, NULL, NULL);
mtkleinf3723212014-06-25 14:08:00 -070093 }
94 return overhead / FLAGS_overheadLoops;
95}
96
mtklein55b0ffc2014-07-17 08:38:23 -070097static int clamp_loops(int loops) {
98 if (loops < 1) {
99 SkDebugf("ERROR: clamping loops from %d to 1.\n", loops);
100 return 1;
101 }
102 if (loops > FLAGS_maxLoops) {
103 SkDebugf("WARNING: clamping loops from %d to FLAGS_maxLoops, %d.\n", loops, FLAGS_maxLoops);
104 return FLAGS_maxLoops;
105 }
106 return loops;
107}
108
mtkleinbb6a0282014-07-01 08:43:42 -0700109static int cpu_bench(const double overhead, Benchmark* bench, SkCanvas* canvas, double* samples) {
110 // First figure out approximately how many loops of bench it takes to make overhead negligible.
111 double bench_plus_overhead;
mtklein55b0ffc2014-07-17 08:38:23 -0700112 int round = 0;
mtkleinf3723212014-06-25 14:08:00 -0700113 do {
mtkleinbb6a0282014-07-01 08:43:42 -0700114 bench_plus_overhead = time(1, bench, canvas, NULL);
mtklein55b0ffc2014-07-17 08:38:23 -0700115 if (++round == FLAGS_maxCalibrationAttempts) {
116 SkDebugf("WARNING: Can't estimate loops for %s (%s vs. %s); skipping.\n",
117 bench->getName(), HUMANIZE(bench_plus_overhead), HUMANIZE(overhead));
118 return 0;
119 }
Mike Kleine3631362014-07-15 17:56:37 -0400120 } while (bench_plus_overhead < overhead);
mtkleinf3723212014-06-25 14:08:00 -0700121
mtkleinbb6a0282014-07-01 08:43:42 -0700122 // Later we'll just start and stop the timer once but loop N times.
mtkleinf3723212014-06-25 14:08:00 -0700123 // We'll pick N to make timer overhead negligible:
124 //
mtkleinbb6a0282014-07-01 08:43:42 -0700125 // overhead
126 // ------------------------- < FLAGS_overheadGoal
127 // overhead + N * Bench Time
mtkleinf3723212014-06-25 14:08:00 -0700128 //
mtkleinbb6a0282014-07-01 08:43:42 -0700129 // where bench_plus_overhead ≈ overhead + Bench Time.
mtkleinf3723212014-06-25 14:08:00 -0700130 //
131 // Doing some math, we get:
132 //
mtkleinbb6a0282014-07-01 08:43:42 -0700133 // (overhead / FLAGS_overheadGoal) - overhead
134 // ------------------------------------------ < N
135 // bench_plus_overhead - overhead)
mtkleinf3723212014-06-25 14:08:00 -0700136 //
137 // Luckily, this also works well in practice. :)
138 const double numer = overhead / FLAGS_overheadGoal - overhead;
mtkleinbb6a0282014-07-01 08:43:42 -0700139 const double denom = bench_plus_overhead - overhead;
mtklein55b0ffc2014-07-17 08:38:23 -0700140 const int loops = clamp_loops(FLAGS_runOnce ? 1 : (int)ceil(numer / denom));
mtkleinbb6a0282014-07-01 08:43:42 -0700141
142 for (int i = 0; i < FLAGS_samples; i++) {
143 samples[i] = time(loops, bench, canvas, NULL) / loops;
144 }
145 return loops;
mtkleinf3723212014-06-25 14:08:00 -0700146}
147
mtkleinbb6a0282014-07-01 08:43:42 -0700148#if SK_SUPPORT_GPU
149static int gpu_bench(SkGLContextHelper* gl,
150 Benchmark* bench,
151 SkCanvas* canvas,
152 double* samples) {
153 // Make sure we're done with whatever came before.
mtklein9bc86ed2014-07-01 10:02:42 -0700154 SK_GL(*gl, Finish());
mtkleinbb6a0282014-07-01 08:43:42 -0700155
156 // First, figure out how many loops it'll take to get a frame up to FLAGS_gpuMs.
157 int loops = 1;
mtkleina189ccd2014-07-14 12:28:47 -0700158 if (!FLAGS_runOnce) {
159 double elapsed = 0;
160 do {
161 loops *= 2;
162 // If the GPU lets frames lag at all, we need to make sure we're timing
163 // _this_ round, not still timing last round. We force this by looping
164 // more times than any reasonable GPU will allow frames to lag.
165 for (int i = 0; i < FLAGS_gpuFrameLag; i++) {
166 elapsed = time(loops, bench, canvas, gl);
167 }
168 } while (elapsed < FLAGS_gpuMs);
mtkleinbb6a0282014-07-01 08:43:42 -0700169
mtkleina189ccd2014-07-14 12:28:47 -0700170 // We've overshot at least a little. Scale back linearly.
171 loops = (int)ceil(loops * FLAGS_gpuMs / elapsed);
mtkleinbb6a0282014-07-01 08:43:42 -0700172
mtkleina189ccd2014-07-14 12:28:47 -0700173 // Might as well make sure we're not still timing our calibration.
174 SK_GL(*gl, Finish());
175 }
mtklein55b0ffc2014-07-17 08:38:23 -0700176 loops = clamp_loops(loops);
mtkleinbb6a0282014-07-01 08:43:42 -0700177
178 // Pretty much the same deal as the calibration: do some warmup to make
179 // sure we're timing steady-state pipelined frames.
180 for (int i = 0; i < FLAGS_gpuFrameLag; i++) {
181 time(loops, bench, canvas, gl);
mtkleinf3723212014-06-25 14:08:00 -0700182 }
mtkleinbb6a0282014-07-01 08:43:42 -0700183
184 // Now, actually do the timing!
185 for (int i = 0; i < FLAGS_samples; i++) {
186 samples[i] = time(loops, bench, canvas, gl) / loops;
187 }
188 return loops;
189}
190#endif
191
192static SkString to_lower(const char* str) {
193 SkString lower(str);
194 for (size_t i = 0; i < lower.size(); i++) {
195 lower[i] = tolower(lower[i]);
196 }
197 return lower;
mtkleinf3723212014-06-25 14:08:00 -0700198}
199
mtkleinbb6a0282014-07-01 08:43:42 -0700200struct Target {
201 const char* config;
202 Benchmark::Backend backend;
203 SkAutoTDelete<SkSurface> surface;
204#if SK_SUPPORT_GPU
205 SkGLContextHelper* gl;
206#endif
207};
mtkleinf3723212014-06-25 14:08:00 -0700208
mtkleinbb6a0282014-07-01 08:43:42 -0700209// If bench is enabled for backend/config, returns a Target* for them, otherwise NULL.
210static Target* is_enabled(Benchmark* bench, Benchmark::Backend backend, const char* config) {
211 if (!bench->isSuitableFor(backend)) {
212 return NULL;
mtkleinf3723212014-06-25 14:08:00 -0700213 }
214
mtkleinbb6a0282014-07-01 08:43:42 -0700215 for (int i = 0; i < FLAGS_config.count(); i++) {
216 if (to_lower(FLAGS_config[i]).equals(config)) {
217 Target* target = new Target;
218 target->config = config;
219 target->backend = backend;
220 return target;
mtkleinf3723212014-06-25 14:08:00 -0700221 }
222 }
mtkleinbb6a0282014-07-01 08:43:42 -0700223 return NULL;
224}
225
226// Append all targets that are suitable for bench.
227static void create_targets(Benchmark* bench, SkTDArray<Target*>* targets) {
228 const int w = bench->getSize().fX,
229 h = bench->getSize().fY;
230 const SkImageInfo _8888 = { w, h, kN32_SkColorType, kPremul_SkAlphaType },
231 _565 = { w, h, kRGB_565_SkColorType, kOpaque_SkAlphaType };
232
233 #define CPU_TARGET(config, backend, code) \
234 if (Target* t = is_enabled(bench, Benchmark::backend, #config)) { \
235 t->surface.reset(code); \
236 targets->push(t); \
237 }
mtklein40b32be2014-07-09 08:46:49 -0700238 if (FLAGS_cpu) {
239 CPU_TARGET(nonrendering, kNonRendering_Backend, NULL)
240 CPU_TARGET(8888, kRaster_Backend, SkSurface::NewRaster(_8888))
241 CPU_TARGET(565, kRaster_Backend, SkSurface::NewRaster(_565))
242 }
mtkleinbb6a0282014-07-01 08:43:42 -0700243
244#if SK_SUPPORT_GPU
mtklein1e319f72014-07-15 08:27:06 -0700245
mtkleinbb6a0282014-07-01 08:43:42 -0700246 #define GPU_TARGET(config, ctxType, info, samples) \
247 if (Target* t = is_enabled(bench, Benchmark::kGPU_Backend, #config)) { \
248 t->surface.reset(SkSurface::NewRenderTarget(gGrFactory.get(ctxType), info, samples)); \
249 t->gl = gGrFactory.getGLContext(ctxType); \
250 targets->push(t); \
251 }
mtklein40b32be2014-07-09 08:46:49 -0700252 if (FLAGS_gpu) {
253 GPU_TARGET(gpu, GrContextFactory::kNative_GLContextType, _8888, 0)
254 GPU_TARGET(msaa4, GrContextFactory::kNative_GLContextType, _8888, 4)
255 GPU_TARGET(msaa16, GrContextFactory::kNative_GLContextType, _8888, 16)
256 GPU_TARGET(nvprmsaa4, GrContextFactory::kNVPR_GLContextType, _8888, 4)
257 GPU_TARGET(nvprmsaa16, GrContextFactory::kNVPR_GLContextType, _8888, 16)
258 GPU_TARGET(debug, GrContextFactory::kDebug_GLContextType, _8888, 0)
259 GPU_TARGET(nullgpu, GrContextFactory::kNull_GLContextType, _8888, 0)
260 #if SK_ANGLE
261 GPU_TARGET(angle, GrContextFactory::kANGLE_GLContextType, _8888, 0)
262 #endif
263 }
mtkleinbb6a0282014-07-01 08:43:42 -0700264#endif
mtkleinf3723212014-06-25 14:08:00 -0700265}
266
mtklein60317d0f2014-07-14 11:30:37 -0700267static void fill_static_options(ResultsWriter* log) {
268#if defined(SK_BUILD_FOR_WIN32)
269 log->option("system", "WIN32");
270#elif defined(SK_BUILD_FOR_MAC)
271 log->option("system", "MAC");
272#elif defined(SK_BUILD_FOR_ANDROID)
273 log->option("system", "ANDROID");
274#elif defined(SK_BUILD_FOR_UNIX)
275 log->option("system", "UNIX");
276#else
277 log->option("system", "other");
278#endif
279#if defined(SK_DEBUG)
280 log->option("build", "DEBUG");
281#else
282 log->option("build", "RELEASE");
283#endif
284}
285
mtkleinf3723212014-06-25 14:08:00 -0700286int tool_main(int argc, char** argv);
287int tool_main(int argc, char** argv) {
288 SetupCrashHandler();
289 SkAutoGraphics ag;
290 SkCommandLineFlags::Parse(argc, argv);
291
mtkleina189ccd2014-07-14 12:28:47 -0700292 if (FLAGS_runOnce) {
293 FLAGS_samples = 1;
294 FLAGS_gpuFrameLag = 0;
295 }
296
mtklein60317d0f2014-07-14 11:30:37 -0700297 MultiResultsWriter log;
298 SkAutoTDelete<JSONResultsWriter> json;
299 if (!FLAGS_outResultsFile.isEmpty()) {
300 json.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0])));
301 log.add(json.get());
302 }
303 CallEnd<MultiResultsWriter> ender(log);
304 fill_static_options(&log);
305
mtkleinf3723212014-06-25 14:08:00 -0700306 const double overhead = estimate_timer_overhead();
mtklein55b0ffc2014-07-17 08:38:23 -0700307 SkDebugf("Timer overhead: %s\n", HUMANIZE(overhead));
Mike Klein91294772014-07-16 19:59:32 -0400308
mtkleinbb6a0282014-07-01 08:43:42 -0700309 SkAutoTMalloc<double> samples(FLAGS_samples);
310
mtkleina189ccd2014-07-14 12:28:47 -0700311 if (FLAGS_runOnce) {
312 SkDebugf("--runOnce is true; times would only be misleading so we won't print them.\n");
313 } else if (FLAGS_verbose) {
mtkleinf3723212014-06-25 14:08:00 -0700314 // No header.
315 } else if (FLAGS_quiet) {
mtklein40b32be2014-07-09 08:46:49 -0700316 SkDebugf("median\tbench\tconfig\n");
mtkleinf3723212014-06-25 14:08:00 -0700317 } else {
mtklein5d9d10e2014-07-11 11:57:07 -0700318 SkDebugf("loops\tmin\tmedian\tmean\tmax\tstddev\tsamples\tconfig\tbench\n");
mtkleinf3723212014-06-25 14:08:00 -0700319 }
320
321 for (const BenchRegistry* r = BenchRegistry::Head(); r != NULL; r = r->next()) {
322 SkAutoTDelete<Benchmark> bench(r->factory()(NULL));
323 if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) {
324 continue;
325 }
mtklein60317d0f2014-07-14 11:30:37 -0700326 log.bench(bench->getName(), bench->getSize().fX, bench->getSize().fY);
mtkleinf3723212014-06-25 14:08:00 -0700327
mtkleinbb6a0282014-07-01 08:43:42 -0700328 SkTDArray<Target*> targets;
329 create_targets(bench.get(), &targets);
mtkleinf3723212014-06-25 14:08:00 -0700330
331 bench->preDraw();
mtkleinbb6a0282014-07-01 08:43:42 -0700332 for (int j = 0; j < targets.count(); j++) {
333 SkCanvas* canvas = targets[j]->surface.get() ? targets[j]->surface->getCanvas() : NULL;
Mike Kleine3631362014-07-15 17:56:37 -0400334 const char* config = targets[j]->config;
mtkleinf3723212014-06-25 14:08:00 -0700335
mtkleinbb6a0282014-07-01 08:43:42 -0700336 const int loops =
337#if SK_SUPPORT_GPU
338 Benchmark::kGPU_Backend == targets[j]->backend
339 ? gpu_bench(targets[j]->gl, bench.get(), canvas, samples.get())
340 :
341#endif
342 cpu_bench( overhead, bench.get(), canvas, samples.get());
mtkleinf3723212014-06-25 14:08:00 -0700343
Mike Kleine3631362014-07-15 17:56:37 -0400344 if (loops == 0) {
345 SkDebugf("Unable to time %s\t%s (overhead %s)\n",
mtklein55b0ffc2014-07-17 08:38:23 -0700346 bench->getName(), config, HUMANIZE(overhead));
Mike Kleine3631362014-07-15 17:56:37 -0400347 continue;
348 }
349
mtkleinf3723212014-06-25 14:08:00 -0700350 Stats stats(samples.get(), FLAGS_samples);
mtklein60317d0f2014-07-14 11:30:37 -0700351 log.config(config);
352 log.timer("min_ms", stats.min);
353 log.timer("median_ms", stats.median);
354 log.timer("mean_ms", stats.mean);
355 log.timer("max_ms", stats.max);
356 log.timer("stddev_ms", sqrt(stats.var));
357
mtkleina189ccd2014-07-14 12:28:47 -0700358 if (FLAGS_runOnce) {
359 if (targets.count() == 1) {
360 config = ""; // Only print the config if we run the same bench on more than one.
361 }
362 SkDebugf("%s\t%s\n", bench->getName(), config);
363 } else if (FLAGS_verbose) {
mtkleinf3723212014-06-25 14:08:00 -0700364 for (int i = 0; i < FLAGS_samples; i++) {
mtklein55b0ffc2014-07-17 08:38:23 -0700365 SkDebugf("%s ", HUMANIZE(samples[i]));
mtkleinf3723212014-06-25 14:08:00 -0700366 }
367 SkDebugf("%s\n", bench->getName());
368 } else if (FLAGS_quiet) {
mtkleinbb6a0282014-07-01 08:43:42 -0700369 if (targets.count() == 1) {
mtkleinf3723212014-06-25 14:08:00 -0700370 config = ""; // Only print the config if we run the same bench on more than one.
371 }
mtklein55b0ffc2014-07-17 08:38:23 -0700372 SkDebugf("%s\t%s\t%s\n", HUMANIZE(stats.median), bench->getName(), config);
mtkleinf3723212014-06-25 14:08:00 -0700373 } else {
374 const double stddev_percent = 100 * sqrt(stats.var) / stats.mean;
mtklein5d9d10e2014-07-11 11:57:07 -0700375 SkDebugf("%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s\n"
mtkleinf3723212014-06-25 14:08:00 -0700376 , loops
mtklein55b0ffc2014-07-17 08:38:23 -0700377 , HUMANIZE(stats.min)
378 , HUMANIZE(stats.median)
379 , HUMANIZE(stats.mean)
380 , HUMANIZE(stats.max)
mtkleinf3723212014-06-25 14:08:00 -0700381 , stddev_percent
mtklein5d9d10e2014-07-11 11:57:07 -0700382 , stats.plot.c_str()
mtkleinf3723212014-06-25 14:08:00 -0700383 , config
mtkleinbb6a0282014-07-01 08:43:42 -0700384 , bench->getName()
mtkleinf3723212014-06-25 14:08:00 -0700385 );
386 }
387 }
mtkleinbb6a0282014-07-01 08:43:42 -0700388 targets.deleteAll();
Mike Klein3944a1d2014-07-15 13:40:19 -0400389
390 #if SK_SUPPORT_GPU
391 if (FLAGS_resetGpuContext) {
392 gGrFactory.destroyContexts();
393 }
394 #endif
mtkleinf3723212014-06-25 14:08:00 -0700395 }
396
397 return 0;
398}
399
400#if !defined SK_BUILD_FOR_IOS
401int main(int argc, char * const argv[]) {
402 return tool_main(argc, (char**) argv);
403}
404#endif