Add tools/nanobench_flags.py.
This should look suspiciously similar to tools/dm_flags.py. In fact, I
tweaked tools/dm_flags.py a bit to make it even more suspiciously similar.
I'll leave actually deduping this to future me.
I noticed we have an opportunity to make our Valgrind run of nanobench faster,
by not only making it not auto-calibrate (--loops 1) but also take only one
measurement (--samples 1). Should be 5-10x faster than the default.
BUG=skia:
Review URL: https://codereview.chromium.org/957503002
diff --git a/tools/dm_flags.py b/tools/dm_flags.py
index 277bf8a..985a62a 100755
--- a/tools/dm_flags.py
+++ b/tools/dm_flags.py
@@ -19,7 +19,7 @@
cov_start = lineno()+1 # We care about coverage starting just past this def.
-def get_dm_args(bot):
+def get_args(bot):
args = []
configs = ['565', '8888', 'gpu', 'nvprmsaa4']
@@ -94,7 +94,7 @@
cov = coverage.coverage()
cov.start()
for case in cases:
- args[case] = get_dm_args(case)
+ args[case] = get_args(case)
cov.stop()
this_file = os.path.basename(__file__)
@@ -119,4 +119,4 @@
sys.exit(1)
with open(sys.argv[1], 'w') as out:
- json.dump(get_dm_args(sys.argv[2]), out)
+ json.dump(get_args(sys.argv[2]), out)
diff --git a/tools/nanobench_flags.json b/tools/nanobench_flags.json
new file mode 100644
index 0000000..2a8f9c8
--- /dev/null
+++ b/tools/nanobench_flags.json
@@ -0,0 +1,36 @@
+{
+ "Perf-Android-GalaxyS3-Mali400-Arm7-Release": [
+ "--scales",
+ "1.0",
+ "1.1",
+ "--match",
+ "~blurroundrect",
+ "~patch_grid",
+ "~desk_carsvg",
+ "--nocpu"
+ ],
+ "Perf-Android-Nexus7-Tegra3-Arm7-Release": [
+ "--scales",
+ "1.0",
+ "1.1",
+ "--match",
+ "skp"
+ ],
+ "Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind": [
+ "--scales",
+ "1.0",
+ "1.1",
+ "--loops",
+ "1",
+ "--samples",
+ "1"
+ ],
+ "Test-Win7-ShuttleA-HD2000-x86-Debug-ANGLE": [
+ "--scales",
+ "1.0",
+ "1.1",
+ "--match",
+ "~gradient",
+ "~etc1bitmap"
+ ]
+}
\ No newline at end of file
diff --git a/tools/nanobench_flags.py b/tools/nanobench_flags.py
new file mode 100755
index 0000000..18668c9
--- /dev/null
+++ b/tools/nanobench_flags.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+
+usage = '''
+Write extra flags to outfile for nanobench based on the bot name:
+ $ python nanobench_flags.py outfile Perf-Android-GalaxyS3-Mali400-Arm7-Release
+Or run self-tests:
+ $ python nanobench_flags.py test
+'''
+
+import inspect
+import json
+import os
+import sys
+
+
+def lineno():
+ caller = inspect.stack()[1] # Up one level to our caller.
+ return inspect.getframeinfo(caller[0]).lineno
+
+
+cov_start = lineno()+1 # We care about coverage starting just past this def.
+def get_args(bot):
+ args = []
+
+ args.extend(['--scales', '1.0', '1.1'])
+
+ if 'Valgrind' in bot:
+ # Don't care about Valgrind performance.
+ args.extend(['--loops', '1'])
+ args.extend(['--samples', '1'])
+
+ match = []
+ if 'Android' in bot:
+ # Segfaults when run as GPU bench. Very large texture?
+ match.append('~blurroundrect')
+ match.append('~patch_grid') # skia:2847
+ match.append('~desk_carsvg')
+ if 'HD2000' in bot:
+ match.extend(['~gradient', '~etc1bitmap']) # skia:2895
+ if 'Nexus7' in bot:
+ match = ['skp'] # skia:2774
+ if match:
+ args.append('--match')
+ args.extend(match)
+
+ if ('GalaxyS3' in bot or
+ 'GalaxyS4' in bot):
+ args.append('--nocpu')
+ return args
+cov_end = lineno() # Don't care about code coverage past here.
+
+
+def self_test():
+ import coverage # This way the bots don't need coverage.py to be installed.
+ args = {}
+ cases = [
+ 'Perf-Android-GalaxyS3-Mali400-Arm7-Release',
+ 'Perf-Android-Nexus7-Tegra3-Arm7-Release',
+ 'Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind',
+ 'Test-Win7-ShuttleA-HD2000-x86-Debug-ANGLE',
+ ]
+
+ cov = coverage.coverage()
+ cov.start()
+ for case in cases:
+ args[case] = get_args(case)
+ cov.stop()
+
+ this_file = os.path.basename(__file__)
+ _, _, not_run, _ = cov.analysis(this_file)
+ filtered = [line for line in not_run if line > cov_start and line < cov_end]
+ if filtered:
+ print 'Lines not covered by test cases: ', filtered
+ sys.exit(1)
+
+ golden = this_file.replace('.py', '.json')
+ with open(os.path.join(os.path.dirname(__file__), golden), 'w') as f:
+ json.dump(args, f, indent=2, sort_keys=True)
+
+
+if __name__ == '__main__':
+ if len(sys.argv) == 2 and sys.argv[1] == 'test':
+ self_test()
+ sys.exit(0)
+
+ if len(sys.argv) != 3:
+ print usage
+ sys.exit(1)
+
+ with open(sys.argv[1], 'w') as out:
+ json.dump(get_args(sys.argv[2]), out)