Split the benchmarks into individual jobspecs
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index b38108d..784d68f 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -323,13 +323,19 @@
         if cpu_cost == 'capacity':
           cpu_cost = multiprocessing.cpu_count()
         if os.path.isfile(binary):
+          test_prefix = None
           if 'gtest' in target and target['gtest']:
-            # here we parse the output of --gtest_list_tests to build up a
-            # complete list of the tests contained in a binary
-            # for each test, we then add a job to run, filtering for just that
-            # test
+            test_prefix = 'gtest'
+          elif 'benchmark' in target and target['benchmark']:
+            test_prefix = 'benchmark'
+
+          if test_prefix:
+            # here we parse the output of --gtest_list_tests (or 
+            # --benchmark_list_tests)to build up a complete list of 
+            # the tests contained in a binary for each test, we then 
+            # add a job to run, filtering for just that test.
             with open(os.devnull, 'w') as fnull:
-              tests = subprocess.check_output([binary, '--gtest_list_tests'],
+              tests = subprocess.check_output([binary, '--%s_list_tests' % test_prefix],
                                               stderr=fnull)
             base = None
             for line in tests.split('\n'):
@@ -342,7 +348,7 @@
                 assert base is not None
                 assert line[1] == ' '
                 test = base + line.strip()
-                cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
+                cmdline = [binary, '--%s_filter=%s' % (test_prefix, test)] + target['args']
                 out.append(self.config.job_spec(cmdline,
                                                 shortname='%s %s' % (' '.join(cmdline), shortname_ext),
                                                 cpu_cost=cpu_cost,