Allow specifying a maximum run time to run_tests
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 5d812f2..460f359 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -348,7 +348,7 @@
   """Manages one run of jobs."""
 
   def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
-               stop_on_failure, add_env, quiet_success):
+               stop_on_failure, add_env, quiet_success, max_time):
     self._running = set()
     self._check_cancelled = check_cancelled
     self._cancelled = False
@@ -360,6 +360,7 @@
     self._stop_on_failure = stop_on_failure
     self._add_env = add_env
     self._quiet_success = quiet_success
+    self._max_time = max_time
     self.resultset = {}
     self._remaining = None
     self._start_time = time.time()
@@ -379,6 +380,12 @@
   def start(self, spec):
     """Start a job. Return True on success, False on failure."""
     while True:
+      if self._max_time > 0 and time.time() - self._start_time > self._max_time:
+        skipped_job_result = JobResult()
+        skipped_job_result.state = 'SKIPPED'
+        message('SKIPPED', spec.shortname, do_newline=True)
+        self.resultset[spec.shortname] = [skipped_job_result]
+        return True
       if self.cancelled(): return False
       current_cpu_cost = self.cpu_cost()
       if current_cpu_cost == 0: break
@@ -474,7 +481,8 @@
         stop_on_failure=False,
         add_env={},
         skip_jobs=False,
-        quiet_success=False):
+        quiet_success=False,
+        max_time=-1):
   if skip_jobs:
     resultset = {}
     skipped_job_result = JobResult()
@@ -486,7 +494,7 @@
   js = Jobset(check_cancelled,
               maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
               newline_on_success, travis, stop_on_failure, add_env,
-              quiet_success)
+              quiet_success, max_time)
   for cmdline, remaining in tag_remaining(cmdlines):
     if not js.start(cmdline):
       break
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 9130bc9..a1ec1b2 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -1210,6 +1210,7 @@
                        'Useful when running many iterations of each test (argument -n).')
 argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
                   help='Dont try to iterate over many polling strategies when they exist')
+argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
 args = argp.parse_args()
 
 if args.force_default_poller:
@@ -1465,7 +1466,7 @@
            not re.search(args.regex_exclude, spec.shortname))))
     # When running on travis, we want out test runs to be as similar as possible
     # for reproducibility purposes.
-    if args.travis:
+    if args.travis and args.max_time <= 0:
       massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
     else:
       # whereas otherwise, we want to shuffle things up to give all tests a
@@ -1493,7 +1494,7 @@
         all_runs, check_cancelled, newline_on_success=newline_on_success,
         travis=args.travis, maxjobs=args.jobs,
         stop_on_failure=args.stop_on_failure,
-        quiet_success=args.quiet_success)
+        quiet_success=args.quiet_success, max_time=args.max_time)
     if resultset:
       for k, v in sorted(resultset.items()):
         num_runs, num_failures = _calculate_num_runs_failures(v)