PHP unit tests run with run_tests.py
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 978a15b..d291abf 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -20,6 +20,7 @@
   def __init__(self, config):
     self.build_config = config
     self.maxjobs = 32 * multiprocessing.cpu_count()
+    self.allow_hashing = (config != 'gcov')
 
   def run_command(self, binary):
     return [binary]
@@ -32,11 +33,43 @@
     self.build_config = config
     self.tool = tool
     self.maxjobs = 4 * multiprocessing.cpu_count()
+    self.allow_hashing = False
 
   def run_command(self, binary):
     return ['valgrind', binary, '--tool=%s' % self.tool]
 
 
+class CLanguage(object):
+
+  def __init__(self, make_target):
+    self.allow_hashing = True
+    self.make_target = make_target
+
+  def test_binaries(self, config):
+    return glob.glob('bins/%s/*_test' % config)
+
+  def make_targets(self):
+    return ['buildtests_%s' % self.make_target]
+
+  def build_steps(self):
+    return []
+
+
+class PhpLanguage(object):
+
+  def __init__(self):
+    self.allow_hashing = False
+
+  def test_binaries(self, config):
+    return ['src/php/bin/run_tests.sh']
+
+  def make_targets(self):
+    return []
+
+  def build_steps(self):
+    return [['tools/run_tests/build_php.sh']]
+
+
 # different configurations we can run under
 _CONFIGS = {
     'dbg': SimpleConfig('dbg'),
@@ -51,10 +84,10 @@
 
 
 _DEFAULT = ['dbg', 'opt']
-_LANGUAGE_BUILD_RULE = {
-    'c++': ['make', 'buildtests_cxx'],
-    'c': ['make', 'buildtests_c'],
-    'php': ['tools/run_tests/build_php.sh']
+_LANGUAGES = {
+    'c++': CLanguage('cxx'),
+    'c': CLanguage('c'),
+    'php': PhpLanguage()
 }
 
 # parse command line
@@ -63,7 +96,6 @@
                   choices=['all'] + sorted(_CONFIGS.keys()),
                   nargs='+',
                   default=_DEFAULT)
-argp.add_argument('-t', '--test-filter', nargs='*', default=['*'])
 argp.add_argument('-n', '--runs_per_test', default=1, type=int)
 argp.add_argument('-f', '--forever',
                   default=False,
@@ -74,9 +106,9 @@
                   action='store_const',
                   const=True)
 argp.add_argument('-l', '--language',
-                  choices=sorted(_LANGUAGE_BUILD_RULE.keys()),
+                  choices=sorted(_LANGUAGES.keys()),
                   nargs='+',
-                  default=sorted(_LANGUAGE_BUILD_RULE.keys()))
+                  default=sorted(_LANGUAGES.keys()))
 args = argp.parse_args()
 
 # grab config
@@ -86,21 +118,17 @@
                       for x in args.config))
 build_configs = set(cfg.build_config for cfg in run_configs)
 
-make_targets = set()
-build_steps = []
-for language in args.language:
-  cmd = _LANGUAGE_BUILD_RULE[language]
-  if cmd[0] == 'make':
-    make_targets.update(cmd[1:])
-  else:
-    build_steps.append(cmd)
-if make_targets:
-  build_steps = [['make',
-                  '-j', '%d' % (multiprocessing.cpu_count() + 1),
-                  'CONFIG=%s' % cfg] + list(make_targets)
-                 for cfg in build_configs] + build_steps
+make_targets = []
+languages = set(_LANGUAGES[l] for l in args.language)
+build_steps = [['make',
+                '-j', '%d' % (multiprocessing.cpu_count() + 1),
+                'CONFIG=%s' % cfg] + list(set(
+                    itertools.chain.from_iterable(l.make_targets()
+                                                  for l in languages)))
+               for cfg in build_configs] + list(
+                   itertools.chain.from_iterable(l.build_steps()
+                                                 for l in languages))
 
-filters = args.test_filter
 runs_per_test = args.runs_per_test
 forever = args.forever
 
@@ -146,28 +174,26 @@
     return 1
 
   # run all the tests
-  if not jobset.run(
-      itertools.ifilter(
-          lambda x: x is not None, (
-              config.run_command(x)
-              for config in run_configs
-              for filt in filters
-              for x in itertools.chain.from_iterable(itertools.repeat(
-                  glob.glob('bins/%s/%s_test' % (
-                      config.build_config, filt)),
-                  runs_per_test)))),
-      check_cancelled,
-      newline_on_success=newline_on_success,
-      maxjobs=min(c.maxjobs for c in run_configs),
-      cache=cache):
+  one_run = dict(
+      (' '.join(config.run_command(x)), config.run_command(x))
+      for config in run_configs
+      for language in args.language
+      for x in _LANGUAGES[language].test_binaries(config.build_config)
+      ).values()
+  all_runs = itertools.chain.from_iterable(
+      itertools.repeat(one_run, runs_per_test))
+  if not jobset.run(all_runs, check_cancelled,
+                    newline_on_success=newline_on_success,
+                    maxjobs=min(c.maxjobs for c in run_configs),
+                    cache=cache):
     return 2
 
   return 0
 
 
-test_cache = (None if runs_per_test != 1
-              or 'gcov' in build_configs
-              or 'valgrind' in build_configs
+test_cache = (None
+              if not all(x.allow_hashing
+                         for x in itertools.chain(languages, run_configs))
               else TestCache())
 if test_cache:
   test_cache.maybe_load()
@@ -187,6 +213,7 @@
                      'All tests are now passing properly',
                      do_newline=True)
     jobset.message('IDLE', 'No change detected')
+    if test_cache: test_cache.save()
     while not have_files_changed():
       time.sleep(1)
 else:
@@ -197,5 +224,5 @@
     jobset.message('SUCCESS', 'All tests passed', do_newline=True)
   else:
     jobset.message('FAILED', 'Some tests failed', do_newline=True)
-  test_cache.save()
+  if test_cache: test_cache.save()
   sys.exit(result)