Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/tools/testrunner/local/commands.py b/tools/testrunner/local/commands.py
index d6445d0..a4df32c 100644
--- a/tools/testrunner/local/commands.py
+++ b/tools/testrunner/local/commands.py
@@ -26,28 +26,14 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
-import os
-import signal
 import subprocess
 import sys
-import tempfile
-import time
+from threading import Timer
 
 from ..local import utils
 from ..objects import output
 
 
-def KillProcessWithID(pid):
-  if utils.IsWindows():
-    os.popen('taskkill /T /F /PID %d' % pid)
-  else:
-    os.kill(pid, signal.SIGTERM)
-
-
-MAX_SLEEP_TIME = 0.1
-INITIAL_SLEEP_TIME = 0.0001
-SLEEP_TIME_FACTOR = 1.25
-
 SEM_INVALID_VALUE = -1
 SEM_NOGPFAULTERRORBOX = 0x0002  # Microsoft Platform SDK WinBase.h
 
@@ -75,77 +61,60 @@
     error_mode = SEM_NOGPFAULTERRORBOX
     prev_error_mode = Win32SetErrorMode(error_mode)
     Win32SetErrorMode(error_mode | prev_error_mode)
-  process = subprocess.Popen(
-    shell=utils.IsWindows(),
-    args=popen_args,
-    **rest
-  )
+
+  try:
+    process = subprocess.Popen(
+      args=popen_args,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      **rest
+    )
+  except Exception as e:
+    sys.stderr.write("Error executing: %s\n" % popen_args)
+    raise e
+
   if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
     Win32SetErrorMode(prev_error_mode)
-  # Compute the end time - if the process crosses this limit we
-  # consider it timed out.
-  if timeout is None: end_time = None
-  else: end_time = time.time() + timeout
-  timed_out = False
-  # Repeatedly check the exit code from the process in a
-  # loop and keep track of whether or not it times out.
-  exit_code = None
-  sleep_time = INITIAL_SLEEP_TIME
-  while exit_code is None:
-    if (not end_time is None) and (time.time() >= end_time):
-      # Kill the process and wait for it to exit.
-      KillProcessWithID(process.pid)
-      exit_code = process.wait()
-      timed_out = True
-    else:
-      exit_code = process.poll()
-      time.sleep(sleep_time)
-      sleep_time = sleep_time * SLEEP_TIME_FACTOR
-      if sleep_time > MAX_SLEEP_TIME:
-        sleep_time = MAX_SLEEP_TIME
-  return (exit_code, timed_out)
 
-
-def PrintError(string):
-  sys.stderr.write(string)
-  sys.stderr.write("\n")
-
-
-def CheckedUnlink(name):
-  # On Windows, when run with -jN in parallel processes,
-  # OS often fails to unlink the temp file. Not sure why.
-  # Need to retry.
-  # Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
-  retry_count = 0
-  while retry_count < 30:
+  def kill_process(process, timeout_result):
+    timeout_result[0] = True
     try:
-      os.unlink(name)
-      return
-    except OSError, e:
-      retry_count += 1
-      time.sleep(retry_count * 0.1)
-  PrintError("os.unlink() " + str(e))
+      if utils.IsWindows():
+        if verbose:
+          print "Attempting to kill process %d" % process.pid
+          sys.stdout.flush()
+        tk = subprocess.Popen(
+            'taskkill /T /F /PID %d' % process.pid,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+        )
+        stdout, stderr = tk.communicate()
+        if verbose:
+          print "Taskkill results for %d" % process.pid
+          print stdout
+          print stderr
+          print "Return code: %d" % tk.returncode
+          sys.stdout.flush()
+      else:
+        process.kill()
+    except OSError:
+      sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+
+  # Pseudo object to communicate with timer thread.
+  timeout_result = [False]
+
+  timer = Timer(timeout, kill_process, [process, timeout_result])
+  timer.start()
+  stdout, stderr = process.communicate()
+  timer.cancel()
+  return process.returncode, timeout_result[0], stdout, stderr
 
 
 def Execute(args, verbose=False, timeout=None):
-  try:
-    args = [ c for c in args if c != "" ]
-    (fd_out, outname) = tempfile.mkstemp()
-    (fd_err, errname) = tempfile.mkstemp()
-    (exit_code, timed_out) = RunProcess(
-      verbose,
-      timeout,
-      args=args,
-      stdout=fd_out,
-      stderr=fd_err
-    )
-  finally:
-    # TODO(machenbach): A keyboard interrupt before the assignment to
-    # fd_out|err can lead to reference errors here.
-    os.close(fd_out)
-    os.close(fd_err)
-    out = file(outname).read()
-    errors = file(errname).read()
-    CheckedUnlink(outname)
-    CheckedUnlink(errname)
-  return output.Output(exit_code, timed_out, out, errors)
+  args = [ c for c in args if c != "" ]
+  exit_code, timed_out, stdout, stderr = RunProcess(
+    verbose,
+    timeout,
+    args=args,
+  )
+  return output.Output(exit_code, timed_out, stdout, stderr)
diff --git a/tools/testrunner/local/execution.py b/tools/testrunner/local/execution.py
index 5c5fbac..c9fe541 100644
--- a/tools/testrunner/local/execution.py
+++ b/tools/testrunner/local/execution.py
@@ -26,18 +26,27 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
+import collections
 import os
 import shutil
+import sys
 import time
 
 from pool import Pool
 from . import commands
 from . import perfdata
 from . import statusfile
+from . import testsuite
 from . import utils
 
 
-class Job(object):
+# Base dir of the v8 checkout.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__)))))
+TEST_DIR = os.path.join(BASE_DIR, "test")
+
+
+class Instructions(object):
   def __init__(self, command, dep_command, test_id, timeout, verbose):
     self.command = command
     self.dep_command = dep_command
@@ -46,24 +55,119 @@
     self.verbose = verbose
 
 
-def RunTest(job):
-  start_time = time.time()
-  if job.dep_command is not None:
-    dep_output = commands.Execute(job.dep_command, job.verbose, job.timeout)
-    # TODO(jkummerow): We approximate the test suite specific function
-    # IsFailureOutput() by just checking the exit code here. Currently
-    # only cctests define dependencies, for which this simplification is
-    # correct.
-    if dep_output.exit_code != 0:
-      return (job.id, dep_output, time.time() - start_time)
-  output = commands.Execute(job.command, job.verbose, job.timeout)
-  return (job.id, output, time.time() - start_time)
+# Structure that keeps global information per worker process.
+ProcessContext = collections.namedtuple(
+    "process_context", ["suites", "context"])
+
+
+def MakeProcessContext(context):
+  """Generate a process-local context.
+
+  This reloads all suites per process and stores the global context.
+
+  Args:
+    context: The global context from the test runner.
+  """
+  suite_paths = utils.GetSuitePaths(TEST_DIR)
+  suites = {}
+  for root in suite_paths:
+    # Don't reinitialize global state as this is concurrently called from
+    # different processes.
+    suite = testsuite.TestSuite.LoadTestSuite(
+        os.path.join(TEST_DIR, root), global_init=False)
+    if suite:
+      suites[suite.name] = suite
+  return ProcessContext(suites, context)
+
+
+def GetCommand(test, context):
+  d8testflag = []
+  shell = test.suite.shell()
+  if shell == "d8":
+    d8testflag = ["--test"]
+  if utils.IsWindows():
+    shell += ".exe"
+  if context.random_seed:
+    d8testflag += ["--random-seed=%s" % context.random_seed]
+  cmd = (context.command_prefix +
+         [os.path.abspath(os.path.join(context.shell_dir, shell))] +
+         d8testflag +
+         test.suite.GetFlagsForTestCase(test, context) +
+         context.extra_flags)
+  return cmd
+
+
+def _GetInstructions(test, context):
+  command = GetCommand(test, context)
+  timeout = context.timeout
+  if ("--stress-opt" in test.flags or
+      "--stress-opt" in context.mode_flags or
+      "--stress-opt" in context.extra_flags):
+    timeout *= 4
+  if "--noenable-vfp3" in context.extra_flags:
+    timeout *= 2
+  # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
+  # the like.
+  if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
+    timeout *= 2
+  if test.dependency is not None:
+    dep_command = [ c.replace(test.path, test.dependency) for c in command ]
+  else:
+    dep_command = None
+  return Instructions(
+      command, dep_command, test.id, timeout, context.verbose)
+
+
+class Job(object):
+  """Stores data to be sent over the multi-process boundary.
+
+  All contained fields will be pickled/unpickled.
+  """
+
+  def Run(self, process_context):
+    """Executes the job.
+
+    Args:
+      process_context: Process-local information that is initialized by the
+                       executing worker.
+    """
+    raise NotImplementedError()
+
+
+class TestJob(Job):
+  def __init__(self, test):
+    self.test = test
+
+  def Run(self, process_context):
+    # Retrieve a new suite object on the worker-process side. The original
+    # suite object isn't pickled.
+    self.test.SetSuiteObject(process_context.suites)
+    instr = _GetInstructions(self.test, process_context.context)
+
+    start_time = time.time()
+    if instr.dep_command is not None:
+      dep_output = commands.Execute(
+          instr.dep_command, instr.verbose, instr.timeout)
+      # TODO(jkummerow): We approximate the test suite specific function
+      # IsFailureOutput() by just checking the exit code here. Currently
+      # only cctests define dependencies, for which this simplification is
+      # correct.
+      if dep_output.exit_code != 0:
+        return (instr.id, dep_output, time.time() - start_time)
+    output = commands.Execute(instr.command, instr.verbose, instr.timeout)
+    return (instr.id, output, time.time() - start_time)
+
+
+def RunTest(job, process_context):
+  return job.Run(process_context)
+
 
 class Runner(object):
 
   def __init__(self, suites, progress_indicator, context):
     self.datapath = os.path.join("out", "testrunner_data")
-    self.perf_data_manager = perfdata.PerfDataManager(self.datapath)
+    self.perf_data_manager = perfdata.GetPerfDataManager(
+        context, self.datapath)
     self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
     self.perf_failures = False
     self.printed_allocations = False
@@ -71,16 +175,22 @@
     if not context.no_sorting:
       for t in self.tests:
         t.duration = self.perfdata.FetchPerfData(t) or 1.0
+      slow_key = lambda t: statusfile.IsSlow(t.outcomes)
+      self.tests.sort(key=slow_key, reverse=True)
       self.tests.sort(key=lambda t: t.duration, reverse=True)
-    self._CommonInit(len(self.tests), progress_indicator, context)
+    self._CommonInit(suites, progress_indicator, context)
 
-  def _CommonInit(self, num_tests, progress_indicator, context):
+  def _CommonInit(self, suites, progress_indicator, context):
+    self.total = 0
+    for s in suites:
+      for t in s.tests:
+        t.id = self.total
+        self.total += 1
     self.indicator = progress_indicator
-    progress_indicator.runner = self
+    progress_indicator.SetRunner(self)
     self.context = context
     self.succeeded = 0
-    self.total = num_tests
-    self.remaining = num_tests
+    self.remaining = self.total
     self.failed = []
     self.crashed = 0
     self.reran_tests = 0
@@ -92,23 +202,6 @@
       print("PerfData exception: %s" % e)
       self.perf_failures = True
 
-  def _GetJob(self, test):
-    command = self.GetCommand(test)
-    timeout = self.context.timeout
-    if ("--stress-opt" in test.flags or
-        "--stress-opt" in self.context.mode_flags or
-        "--stress-opt" in self.context.extra_flags):
-      timeout *= 4
-    # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
-    # the like.
-    if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
-      timeout *= 2
-    if test.dependency is not None:
-      dep_command = [ c.replace(test.path, test.dependency) for c in command ]
-    else:
-      dep_command = None
-    return Job(command, dep_command, test.id, timeout, self.context.verbose)
-
   def _MaybeRerun(self, pool, test):
     if test.run <= self.context.rerun_failures_count:
       # Possibly rerun this test if its run count is below the maximum per
@@ -129,8 +222,9 @@
       test.duration = None
       test.output = None
       test.run += 1
-      pool.add([self._GetJob(test)])
+      pool.add([TestJob(test)])
       self.remaining += 1
+      self.total += 1
 
   def _ProcessTestNormal(self, test, result, pool):
     self.indicator.AboutToRun(test)
@@ -150,6 +244,7 @@
     self.indicator.HasRun(test, has_unexpected_output or test.run > 1)
     if has_unexpected_output:
       # Rerun test failures after the indicator has processed the results.
+      self._VerbosePrint("Attempting to rerun test after failure.")
       self._MaybeRerun(pool, test)
     # Update the perf database if the test succeeded.
     return not has_unexpected_output
@@ -197,7 +292,7 @@
       # remember the output for comparison.
       test.run += 1
       test.output = result[1]
-      pool.add([self._GetJob(test)])
+      pool.add([TestJob(test)])
     # Always update the perf database.
     return True
 
@@ -205,66 +300,70 @@
     self.indicator.Starting()
     self._RunInternal(jobs)
     self.indicator.Done()
-    if self.failed or self.remaining:
+    if self.failed:
       return 1
+    elif self.remaining:
+      return 2
     return 0
 
   def _RunInternal(self, jobs):
     pool = Pool(jobs)
     test_map = {}
-    # TODO(machenbach): Instead of filling the queue completely before
-    # pool.imap_unordered, make this a generator that already starts testing
-    # while the queue is filled.
-    queue = []
-    queued_exception = None
-    for test in self.tests:
-      assert test.id >= 0
-      test_map[test.id] = test
-      try:
-        queue.append([self._GetJob(test)])
-      except Exception, e:
-        # If this failed, save the exception and re-raise it later (after
-        # all other tests have had a chance to run).
-        queued_exception = e
-        continue
+    queued_exception = [None]
+    def gen_tests():
+      for test in self.tests:
+        assert test.id >= 0
+        test_map[test.id] = test
+        try:
+          yield [TestJob(test)]
+        except Exception, e:
+          # If this failed, save the exception and re-raise it later (after
+          # all other tests have had a chance to run).
+          queued_exception[0] = e
+          continue
     try:
-      it = pool.imap_unordered(RunTest, queue)
+      it = pool.imap_unordered(
+          fn=RunTest,
+          gen=gen_tests(),
+          process_context_fn=MakeProcessContext,
+          process_context_args=[self.context],
+      )
       for result in it:
-        test = test_map[result[0]]
+        if result.heartbeat:
+          self.indicator.Heartbeat()
+          continue
+        test = test_map[result.value[0]]
         if self.context.predictable:
-          update_perf = self._ProcessTestPredictable(test, result, pool)
+          update_perf = self._ProcessTestPredictable(test, result.value, pool)
         else:
-          update_perf = self._ProcessTestNormal(test, result, pool)
+          update_perf = self._ProcessTestNormal(test, result.value, pool)
         if update_perf:
           self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
     finally:
+      self._VerbosePrint("Closing process pool.")
       pool.terminate()
+      self._VerbosePrint("Closing database connection.")
       self._RunPerfSafe(lambda: self.perf_data_manager.close())
       if self.perf_failures:
         # Nuke perf data in case of failures. This might not work on windows as
         # some files might still be open.
         print "Deleting perf test data due to db corruption."
         shutil.rmtree(self.datapath)
-    if queued_exception:
-      raise queued_exception
+    if queued_exception[0]:
+      raise queued_exception[0]
 
-    # Make sure that any allocations were printed in predictable mode.
-    assert not self.context.predictable or self.printed_allocations
+    # Make sure that any allocations were printed in predictable mode (if we
+    # ran any tests).
+    assert (
+        not self.total or
+        not self.context.predictable or
+        self.printed_allocations
+    )
 
-  def GetCommand(self, test):
-    d8testflag = []
-    shell = test.suite.shell()
-    if shell == "d8":
-      d8testflag = ["--test"]
-    if utils.IsWindows():
-      shell += ".exe"
-    cmd = (self.context.command_prefix +
-           [os.path.abspath(os.path.join(self.context.shell_dir, shell))] +
-           d8testflag +
-           ["--random-seed=%s" % self.context.random_seed] +
-           test.suite.GetFlagsForTestCase(test, self.context) +
-           self.context.extra_flags)
-    return cmd
+  def _VerbosePrint(self, text):
+    if self.context.verbose:
+      print text
+      sys.stdout.flush()
 
 
 class BreakNowException(Exception):
diff --git a/tools/testrunner/local/perfdata.py b/tools/testrunner/local/perfdata.py
index 2979dc4..29ebff7 100644
--- a/tools/testrunner/local/perfdata.py
+++ b/tools/testrunner/local/perfdata.py
@@ -118,3 +118,29 @@
       if not mode in modes:
         modes[mode] = PerfDataStore(self.datadir, arch, mode)
       return modes[mode]
+
+
+class NullPerfDataStore(object):
+  def UpdatePerfData(self, test):
+    pass
+
+  def FetchPerfData(self, test):
+    return None
+
+
+class NullPerfDataManager(object):
+  def __init__(self):
+    pass
+
+  def GetStore(self, *args, **kwargs):
+    return NullPerfDataStore()
+
+  def close(self):
+    pass
+
+
+def GetPerfDataManager(context, datadir):
+  if context.use_perf_data:
+    return PerfDataManager(datadir)
+  else:
+    return NullPerfDataManager()
diff --git a/tools/testrunner/local/pool.py b/tools/testrunner/local/pool.py
index 602a2d4..6d123fd 100644
--- a/tools/testrunner/local/pool.py
+++ b/tools/testrunner/local/pool.py
@@ -3,7 +3,10 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+from Queue import Empty
 from multiprocessing import Event, Process, Queue
+import traceback
+
 
 class NormalResult():
   def __init__(self, result):
@@ -24,17 +27,36 @@
     self.break_now = True
 
 
-def Worker(fn, work_queue, done_queue, done):
+class MaybeResult():
+  def __init__(self, heartbeat, value):
+    self.heartbeat = heartbeat
+    self.value = value
+
+  @staticmethod
+  def create_heartbeat():
+    return MaybeResult(True, None)
+
+  @staticmethod
+  def create_result(value):
+    return MaybeResult(False, value)
+
+
+def Worker(fn, work_queue, done_queue, done,
+           process_context_fn=None, process_context_args=None):
   """Worker to be run in a child process.
   The worker stops on two conditions. 1. When the poison pill "STOP" is
   reached or 2. when the event "done" is set."""
   try:
+    kwargs = {}
+    if process_context_fn and process_context_args is not None:
+      kwargs.update(process_context=process_context_fn(*process_context_args))
     for args in iter(work_queue.get, "STOP"):
       if done.is_set():
         break
       try:
-        done_queue.put(NormalResult(fn(*args)))
+        done_queue.put(NormalResult(fn(*args, **kwargs)))
       except Exception, e:
+        traceback.print_exc()
         print(">>> EXCEPTION: %s" % e)
         done_queue.put(ExceptionResult())
   except KeyboardInterrupt:
@@ -51,7 +73,7 @@
   # Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
   BUFFER_FACTOR = 4
 
-  def __init__(self, num_workers):
+  def __init__(self, num_workers, heartbeat_timeout=30):
     self.num_workers = num_workers
     self.processes = []
     self.terminated = False
@@ -67,11 +89,25 @@
     self.work_queue = Queue()
     self.done_queue = Queue()
     self.done = Event()
+    self.heartbeat_timeout = heartbeat_timeout
 
-  def imap_unordered(self, fn, gen):
+  def imap_unordered(self, fn, gen,
+                     process_context_fn=None, process_context_args=None):
     """Maps function "fn" to items in generator "gen" on the worker processes
     in an arbitrary order. The items are expected to be lists of arguments to
-    the function. Returns a results iterator."""
+    the function. Returns a results iterator. A result value of type
+    MaybeResult either indicates a heartbeat of the runner, i.e. indicating
+    that the runner is still waiting for the result to be computed, or it wraps
+    the real result.
+
+    Args:
+      process_context_fn: Function executed once by each worker. Expected to
+          return a process-context object. If present, this object is passed
+          as additional argument to each call to fn.
+      process_context_args: List of arguments for the invocation of
+          process_context_fn. All arguments will be pickled and sent beyond the
+          process boundary.
+    """
     try:
       gen = iter(gen)
       self.advance = self._advance_more
@@ -80,13 +116,22 @@
         p = Process(target=Worker, args=(fn,
                                          self.work_queue,
                                          self.done_queue,
-                                         self.done))
+                                         self.done,
+                                         process_context_fn,
+                                         process_context_args))
         self.processes.append(p)
         p.start()
 
       self.advance(gen)
       while self.count > 0:
-        result = self.done_queue.get()
+        while True:
+          try:
+            result = self.done_queue.get(timeout=self.heartbeat_timeout)
+            break
+          except Empty:
+            # Indicate a heartbeat. The iterator will continue fetching the
+            # next result.
+            yield MaybeResult.create_heartbeat()
         self.count -= 1
         if result.exception:
           # Ignore items with unexpected exceptions.
@@ -95,7 +140,7 @@
           # A keyboard interrupt happened in one of the worker processes.
           raise KeyboardInterrupt
         else:
-          yield result.result
+          yield MaybeResult.create_result(result.result)
         self.advance(gen)
     finally:
       self.terminate()
diff --git a/tools/testrunner/local/pool_unittest.py b/tools/testrunner/local/pool_unittest.py
index bf2b3f8..335d20a 100644
--- a/tools/testrunner/local/pool_unittest.py
+++ b/tools/testrunner/local/pool_unittest.py
@@ -17,7 +17,7 @@
     results = set()
     pool = Pool(3)
     for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
-      results.add(result)
+      results.add(result.value)
     self.assertEquals(set(range(0, 10)), results)
 
   def testException(self):
@@ -25,7 +25,7 @@
     pool = Pool(3)
     for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
       # Item 10 will not appear in results due to an internal exception.
-      results.add(result)
+      results.add(result.value)
     expect = set(range(0, 12))
     expect.remove(10)
     self.assertEquals(expect, results)
@@ -34,8 +34,8 @@
     results = set()
     pool = Pool(3)
     for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
-      results.add(result)
-      if result < 30:
-        pool.add([result + 20])
+      results.add(result.value)
+      if result.value < 30:
+        pool.add([result.value + 20])
     self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
                       results)
diff --git a/tools/testrunner/local/progress.py b/tools/testrunner/local/progress.py
index 2616958..4e1be3e 100644
--- a/tools/testrunner/local/progress.py
+++ b/tools/testrunner/local/progress.py
@@ -26,34 +26,27 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
+from functools import wraps
 import json
 import os
 import sys
 import time
 
+from . import execution
 from . import junit_output
 
 
 ABS_PATH_PREFIX = os.getcwd() + os.sep
 
 
-def EscapeCommand(command):
-  parts = []
-  for part in command:
-    if ' ' in part:
-      # Escape spaces.  We may need to escape more characters for this
-      # to work properly.
-      parts.append('"%s"' % part)
-    else:
-      parts.append(part)
-  return " ".join(parts)
-
-
 class ProgressIndicator(object):
 
   def __init__(self):
     self.runner = None
 
+  def SetRunner(self, runner):
+    self.runner = runner
+
   def Starting(self):
     pass
 
@@ -66,6 +59,9 @@
   def HasRun(self, test, has_unexpected_output):
     pass
 
+  def Heartbeat(self):
+    pass
+
   def PrintFailureHeader(self, test):
     if test.suite.IsNegativeTest(test):
       negative_marker = '[negative] '
@@ -76,6 +72,42 @@
       'negative': negative_marker
     }
 
+  def _EscapeCommand(self, test):
+    command = execution.GetCommand(test, self.runner.context)
+    parts = []
+    for part in command:
+      if ' ' in part:
+        # Escape spaces.  We may need to escape more characters for this
+        # to work properly.
+        parts.append('"%s"' % part)
+      else:
+        parts.append(part)
+    return " ".join(parts)
+
+
+class IndicatorNotifier(object):
+  """Holds a list of progress indicators and notifies them all on events."""
+  def __init__(self):
+    self.indicators = []
+
+  def Register(self, indicator):
+    self.indicators.append(indicator)
+
+
+# Forge all generic event-dispatching methods in IndicatorNotifier, which are
+# part of the ProgressIndicator interface.
+for func_name in ProgressIndicator.__dict__:
+  func = getattr(ProgressIndicator, func_name)
+  if callable(func) and not func.__name__.startswith('_'):
+    def wrap_functor(f):
+      @wraps(f)
+      def functor(self, *args, **kwargs):
+        """Generic event dispatcher."""
+        for indicator in self.indicators:
+          getattr(indicator, f.__name__)(*args, **kwargs)
+      return functor
+    setattr(IndicatorNotifier, func_name, wrap_functor(func))
+
 
 class SimpleProgressIndicator(ProgressIndicator):
   """Abstract base class for {Verbose,Dots}ProgressIndicator"""
@@ -93,7 +125,7 @@
       if failed.output.stdout:
         print "--- stdout ---"
         print failed.output.stdout.strip()
-      print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
+      print "Command: %s" % self._EscapeCommand(failed)
       if failed.output.HasCrashed():
         print "exit code: %d" % failed.output.exit_code
         print "--- CRASHED ---"
@@ -127,6 +159,11 @@
     else:
       outcome = 'pass'
     print 'Done running %s: %s' % (test.GetLabel(), outcome)
+    sys.stdout.flush()
+
+  def Heartbeat(self):
+    print 'Still working...'
+    sys.stdout.flush()
 
 
 class DotsProgressIndicator(SimpleProgressIndicator):
@@ -176,7 +213,7 @@
       stderr = test.output.stderr.strip()
       if len(stderr):
         print self.templates['stderr'] % stderr
-      print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+      print "Command: %s" % self._EscapeCommand(test)
       if test.output.HasCrashed():
         print "exit code: %d" % test.output.exit_code
         print "--- CRASHED ---"
@@ -192,10 +229,12 @@
   def PrintProgress(self, name):
     self.ClearLine(self.last_status_length)
     elapsed = time.time() - self.start_time
+    progress = 0 if not self.runner.total else (
+        ((self.runner.total - self.runner.remaining) * 100) //
+          self.runner.total)
     status = self.templates['status_line'] % {
       'passed': self.runner.succeeded,
-      'remaining': (((self.runner.total - self.runner.remaining) * 100) //
-                    self.runner.total),
+      'progress': progress,
       'failed': len(self.runner.failed),
       'test': name,
       'mins': int(elapsed) / 60,
@@ -212,7 +251,7 @@
   def __init__(self):
     templates = {
       'status_line': ("[%(mins)02i:%(secs)02i|"
-                      "\033[34m%%%(remaining) 4d\033[0m|"
+                      "\033[34m%%%(progress) 4d\033[0m|"
                       "\033[32m+%(passed) 4d\033[0m|"
                       "\033[31m-%(failed) 4d\033[0m]: %(test)s"),
       'stdout': "\033[1m%s\033[0m",
@@ -228,7 +267,7 @@
 
   def __init__(self):
     templates = {
-      'status_line': ("[%(mins)02i:%(secs)02i|%%%(remaining) 4d|"
+      'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
                       "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
       'stdout': '%s',
       'stderr': '%s',
@@ -241,29 +280,19 @@
 
 class JUnitTestProgressIndicator(ProgressIndicator):
 
-  def __init__(self, progress_indicator, junitout, junittestsuite):
-    self.progress_indicator = progress_indicator
+  def __init__(self, junitout, junittestsuite):
     self.outputter = junit_output.JUnitTestOutput(junittestsuite)
     if junitout:
       self.outfile = open(junitout, "w")
     else:
       self.outfile = sys.stdout
 
-  def Starting(self):
-    self.progress_indicator.runner = self.runner
-    self.progress_indicator.Starting()
-
   def Done(self):
-    self.progress_indicator.Done()
     self.outputter.FinishAndWrite(self.outfile)
     if self.outfile != sys.stdout:
       self.outfile.close()
 
-  def AboutToRun(self, test):
-    self.progress_indicator.AboutToRun(test)
-
   def HasRun(self, test, has_unexpected_output):
-    self.progress_indicator.HasRun(test, has_unexpected_output)
     fail_text = ""
     if has_unexpected_output:
       stdout = test.output.stdout.strip()
@@ -272,7 +301,7 @@
       stderr = test.output.stderr.strip()
       if len(stderr):
         fail_text += "stderr:\n%s\n" % stderr
-      fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+      fail_text += "Command: %s" % self._EscapeCommand(test)
       if test.output.HasCrashed():
         fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
       if test.output.HasTimedOut():
@@ -285,39 +314,46 @@
 
 class JsonTestProgressIndicator(ProgressIndicator):
 
-  def __init__(self, progress_indicator, json_test_results, arch, mode):
-    self.progress_indicator = progress_indicator
+  def __init__(self, json_test_results, arch, mode, random_seed):
     self.json_test_results = json_test_results
     self.arch = arch
     self.mode = mode
+    self.random_seed = random_seed
     self.results = []
-
-  def Starting(self):
-    self.progress_indicator.runner = self.runner
-    self.progress_indicator.Starting()
+    self.tests = []
 
   def Done(self):
-    self.progress_indicator.Done()
     complete_results = []
     if os.path.exists(self.json_test_results):
       with open(self.json_test_results, "r") as f:
         # Buildbot might start out with an empty file.
         complete_results = json.loads(f.read() or "[]")
 
+    # Sort tests by duration.
+    timed_tests = [t for t in self.tests if t.duration is not None]
+    timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
+    slowest_tests = [
+      {
+        "name": test.GetLabel(),
+        "flags": test.flags,
+        "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
+        "duration": test.duration,
+      } for test in timed_tests[:20]
+    ]
+
     complete_results.append({
       "arch": self.arch,
       "mode": self.mode,
       "results": self.results,
+      "slowest_tests": slowest_tests,
     })
 
     with open(self.json_test_results, "w") as f:
       f.write(json.dumps(complete_results))
 
-  def AboutToRun(self, test):
-    self.progress_indicator.AboutToRun(test)
-
   def HasRun(self, test, has_unexpected_output):
-    self.progress_indicator.HasRun(test, has_unexpected_output)
+    # Buffer all tests for sorting the durations in the end.
+    self.tests.append(test)
     if not has_unexpected_output:
       # Omit tests that run as expected. Passing tests of reruns after failures
       # will have unexpected_output to be reported here has well.
@@ -326,14 +362,20 @@
     self.results.append({
       "name": test.GetLabel(),
       "flags": test.flags,
-      "command": EscapeCommand(self.runner.GetCommand(test)).replace(
-          ABS_PATH_PREFIX, ""),
+      "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
       "run": test.run,
       "stdout": test.output.stdout,
       "stderr": test.output.stderr,
       "exit_code": test.output.exit_code,
       "result": test.suite.GetOutcome(test),
       "expected": list(test.outcomes or ["PASS"]),
+      "duration": test.duration,
+
+      # TODO(machenbach): This stores only the global random seed from the
+      # context and not possible overrides when using random-seed stress.
+      "random_seed": self.random_seed,
+      "target_name": test.suite.shell(),
+      "variant": test.variant,
     })
 
 
diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py
index a313f05..f86106b 100644
--- a/tools/testrunner/local/statusfile.py
+++ b/tools/testrunner/local/statusfile.py
@@ -25,6 +25,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import os
 
 # These outcomes can occur in a TestCase's outcomes list:
 SKIP = "SKIP"
@@ -40,12 +41,13 @@
 # These are just for the status files and are mapped below in DEFS:
 FAIL_OK = "FAIL_OK"
 PASS_OR_FAIL = "PASS_OR_FAIL"
+FAIL_SLOPPY = "FAIL_SLOPPY"
 
 ALWAYS = "ALWAYS"
 
 KEYWORDS = {}
 for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
-            FAST_VARIANTS, NO_VARIANTS, PASS_OR_FAIL, ALWAYS]:
+            FAST_VARIANTS, NO_VARIANTS, PASS_OR_FAIL, FAIL_SLOPPY, ALWAYS]:
   KEYWORDS[key] = key
 
 DEFS = {FAIL_OK: [FAIL, OKAY],
@@ -53,9 +55,11 @@
 
 # Support arches, modes to be written as keywords instead of strings.
 VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
-            "arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
-            "nacl_x64", "macos", "windows", "linux"]:
+for var in ["debug", "release", "big", "little",
+            "android_arm", "android_arm64", "android_ia32", "android_x87",
+            "android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
+            "mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
+            "macos", "windows", "linux", "aix"]:
   VARIABLES[var] = var
 
 
@@ -104,7 +108,7 @@
 def _ParseOutcomeList(rule, outcomes, target_dict, variables):
   result = set([])
   if type(outcomes) == str:
-   outcomes = [outcomes]
+    outcomes = [outcomes]
   for item in outcomes:
     if type(item) == str:
       _AddOutcome(result, item)
@@ -122,10 +126,14 @@
     target_dict[rule] = result
 
 
-def ReadStatusFile(path, variables):
+def ReadContent(path):
   with open(path) as f:
     global KEYWORDS
-    contents = eval(f.read(), KEYWORDS)
+    return eval(f.read(), KEYWORDS)
+
+
+def ReadStatusFile(path, variables):
+  contents = ReadContent(path)
 
   rules = {}
   wildcards = {}
@@ -143,3 +151,30 @@
       else:
         _ParseOutcomeList(rule, section[rule], rules, variables)
   return rules, wildcards
+
+
+def PresubmitCheck(path):
+  contents = ReadContent(path)
+  root_prefix = os.path.basename(os.path.dirname(path)) + "/"
+  status = {"success": True}
+  def _assert(check, message):  # Like "assert", but doesn't throw.
+    if not check:
+      print("%s: Error: %s" % (path, message))
+      status["success"] = False
+  try:
+    for section in contents:
+      _assert(type(section) == list, "Section must be a list")
+      _assert(len(section) == 2, "Section list must have exactly 2 entries")
+      section = section[1]
+      _assert(type(section) == dict,
+              "Second entry of section must be a dictionary")
+      for rule in section:
+        _assert(type(rule) == str, "Rule key must be a string")
+        _assert(not rule.startswith(root_prefix),
+                "Suite name prefix must not be used in rule keys")
+        _assert(not rule.endswith('.js'),
+                ".js extension must not be used in rule keys.")
+    return status["success"]
+  except Exception as e:
+    print e
+    return False
diff --git a/tools/testrunner/local/testsuite.py b/tools/testrunner/local/testsuite.py
index 84f07fe..e3d1e23 100644
--- a/tools/testrunner/local/testsuite.py
+++ b/tools/testrunner/local/testsuite.py
@@ -35,27 +35,67 @@
 from ..objects import testcase
 
 # Use this to run several variants of the tests.
-VARIANT_FLAGS = {
-    "default": [],
-    "stress": ["--stress-opt", "--always-opt"],
-    "turbofan": ["--turbo-asm", "--turbo-filter=*", "--always-opt"],
-    "nocrankshaft": ["--nocrankshaft"]}
+ALL_VARIANT_FLAGS = {
+  "default": [[]],
+  "stress": [["--stress-opt", "--always-opt"]],
+  "turbofan": [["--turbo"]],
+  "turbofan_opt": [["--turbo", "--always-opt"]],
+  "nocrankshaft": [["--nocrankshaft"]],
+  "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
+                "--ignition-fallback-on-eval-and-catch"]],
+  "preparser": [["--min-preparse-length=0"]],
+}
 
-FAST_VARIANT_FLAGS = [
-    f for v, f in VARIANT_FLAGS.iteritems() if v in ["default", "turbofan"]
-]
+# FAST_VARIANTS implies no --always-opt.
+FAST_VARIANT_FLAGS = {
+  "default": [[]],
+  "stress": [["--stress-opt"]],
+  "turbofan": [["--turbo"]],
+  "nocrankshaft": [["--nocrankshaft"]],
+  "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
+                "--ignition-fallback-on-eval-and-catch"]],
+  "preparser": [["--min-preparse-length=0"]],
+}
+
+ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
+                    "nocrankshaft", "ignition", "preparser"])
+FAST_VARIANTS = set(["default", "turbofan"])
+STANDARD_VARIANT = set(["default"])
+
+
+class VariantGenerator(object):
+  def __init__(self, suite, variants):
+    self.suite = suite
+    self.all_variants = ALL_VARIANTS & variants
+    self.fast_variants = FAST_VARIANTS & variants
+    self.standard_variant = STANDARD_VARIANT & variants
+
+  def FilterVariantsByTest(self, testcase):
+    if testcase.outcomes and statusfile.OnlyStandardVariant(
+        testcase.outcomes):
+      return self.standard_variant
+    if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+      return self.fast_variants
+    return self.all_variants
+
+  def GetFlagSets(self, testcase, variant):
+    if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+      return FAST_VARIANT_FLAGS[variant]
+    else:
+      return ALL_VARIANT_FLAGS[variant]
+
 
 class TestSuite(object):
 
   @staticmethod
-  def LoadTestSuite(root):
+  def LoadTestSuite(root, global_init=True):
     name = root.split(os.path.sep)[-1]
     f = None
     try:
       (f, pathname, description) = imp.find_module("testcfg", [root])
       module = imp.load_module("testcfg", f, pathname, description)
       return module.GetSuite(name, root)
-    except:
+    except ImportError:
       # Use default if no testcfg is present.
       return GoogleTestSuite(name, root)
     finally:
@@ -63,6 +103,8 @@
         f.close()
 
   def __init__(self, name, root):
+    # Note: This might be called concurrently from different processes.
+    # Changing harddisk state should be done in 'SetupWorkingDirectory' below.
     self.name = name  # string
     self.root = root  # string containing path
     self.tests = None  # list of TestCase objects
@@ -70,6 +112,11 @@
     self.wildcards = None  # dictionary mapping test paths to list of outcomes
     self.total_duration = None  # float, assigned on demand
 
+  def SetupWorkingDirectory(self):
+    # This is called once per test suite object in a multi-process setting.
+    # Multi-process-unsafe work-directory setup can go here.
+    pass
+
   def shell(self):
     return "d8"
 
@@ -89,12 +136,19 @@
   def ListTests(self, context):
     raise NotImplementedError
 
-  def VariantFlags(self, testcase, default_flags):
-    if testcase.outcomes and statusfile.OnlyStandardVariant(testcase.outcomes):
-      return [[]]
-    if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
-      return filter(lambda flags: flags in FAST_VARIANT_FLAGS, default_flags)
-    return default_flags
+  def _VariantGeneratorFactory(self):
+    """The variant generator class to be used."""
+    return VariantGenerator
+
+  def CreateVariantGenerator(self, variants):
+    """Return a generator for the testing variants of this suite.
+
+    Args:
+      variants: List of variant names to be run as specified by the test
+                runner.
+    Returns: An object of type VariantGenerator.
+    """
+    return self._VariantGeneratorFactory()(self, set(variants))
 
   def DownloadData(self):
     pass
@@ -147,7 +201,7 @@
         assert rule[-1] == '*'
         if testname.startswith(rule[:-1]):
           used_rules.add(rule)
-          t.outcomes = self.wildcards[rule]
+          t.outcomes |= self.wildcards[rule]
           if statusfile.DoSkip(t.outcomes):
             skip = True
             break  # "for rule in self.wildcards"
@@ -172,23 +226,36 @@
         print("Unused rule: %s -> %s" % (rule, self.wildcards[rule]))
 
   def FilterTestCasesByArgs(self, args):
+    """Filter test cases based on command-line arguments.
+
+    An argument with an asterisk in the end will match all test cases
+    that have the argument as a prefix. Without asterisk, only exact matches
+    will be used with the exeption of the test-suite name as argument.
+    """
     filtered = []
-    filtered_args = []
+    globs = []
+    exact_matches = []
     for a in args:
-      argpath = a.split(os.path.sep)
+      argpath = a.split('/')
       if argpath[0] != self.name:
         continue
       if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
         return  # Don't filter, run all tests in this suite.
-      path = os.path.sep.join(argpath[1:])
+      path = '/'.join(argpath[1:])
       if path[-1] == '*':
         path = path[:-1]
-      filtered_args.append(path)
+        globs.append(path)
+      else:
+        exact_matches.append(path)
     for t in self.tests:
-      for a in filtered_args:
+      for a in globs:
         if t.path.startswith(a):
           filtered.append(t)
           break
+      for a in exact_matches:
+        if t.path == a:
+          filtered.append(t)
+          break
     self.tests = filtered
 
   def GetFlagsForTestCase(self, testcase, context):
@@ -236,6 +303,11 @@
     return self.total_duration
 
 
+class StandardVariantGenerator(VariantGenerator):
+  def FilterVariantsByTest(self, testcase):
+    return self.standard_variant
+
+
 class GoogleTestSuite(TestSuite):
   def __init__(self, name, root):
     super(GoogleTestSuite, self).__init__(name, root)
@@ -269,5 +341,8 @@
             ["--gtest_print_time=0"] +
             context.mode_flags)
 
+  def _VariantGeneratorFactory(self):
+    return StandardVariantGenerator
+
   def shell(self):
     return self.name
diff --git a/tools/testrunner/local/utils.py b/tools/testrunner/local/utils.py
index 7bc21b1..cb6c350 100644
--- a/tools/testrunner/local/utils.py
+++ b/tools/testrunner/local/utils.py
@@ -32,6 +32,7 @@
 from os.path import join
 import platform
 import re
+import subprocess
 import urllib2
 
 
@@ -73,6 +74,8 @@
     return 'solaris'
   elif system == 'NetBSD':
     return 'netbsd'
+  elif system == 'AIX':
+    return 'aix'
   else:
     return None
 
@@ -99,6 +102,8 @@
     return 'ia32'
   elif machine == 'amd64':
     return 'ia32'
+  elif machine == 'ppc64':
+    return 'ppc'
   else:
     return None
 
@@ -117,5 +122,15 @@
 def URLRetrieve(source, destination):
   """urllib is broken for SSL connections via a proxy therefore we
   can't use urllib.urlretrieve()."""
+  if IsWindows():
+    try:
+      # In python 2.7.6 on windows, urlopen has a problem with redirects.
+      # Try using curl instead. Note, this is fixed in 2.7.8.
+      subprocess.check_call(["curl", source, '-k', '-L', '-o', destination])
+      return
+    except:
+      # If there's no curl, fall back to urlopen.
+      print "Curl is currently not installed. Falling back to python."
+      pass
   with open(destination, 'w') as f:
     f.write(urllib2.urlopen(source).read())
diff --git a/tools/testrunner/network/endpoint.py b/tools/testrunner/network/endpoint.py
index d0950cf..516578a 100644
--- a/tools/testrunner/network/endpoint.py
+++ b/tools/testrunner/network/endpoint.py
@@ -93,6 +93,7 @@
     suite = testsuite.TestSuite.LoadTestSuite(
         os.path.join(workspace, "test", root))
     if suite:
+      suite.SetupWorkingDirectory()
       suites.append(suite)
 
   suites_dict = {}
diff --git a/tools/testrunner/network/network_execution.py b/tools/testrunner/network/network_execution.py
index a43a6cf..c842aba 100644
--- a/tools/testrunner/network/network_execution.py
+++ b/tools/testrunner/network/network_execution.py
@@ -52,7 +52,6 @@
 class NetworkedRunner(execution.Runner):
   def __init__(self, suites, progress_indicator, context, peers, workspace):
     self.suites = suites
-    num_tests = 0
     datapath = os.path.join("out", "testrunner_data")
     # TODO(machenbach): These fields should exist now in the superclass.
     # But there is no super constructor call. Check if this is a problem.
@@ -61,8 +60,7 @@
     for s in suites:
       for t in s.tests:
         t.duration = self.perfdata.FetchPerfData(t) or 1.0
-      num_tests += len(s.tests)
-    self._CommonInit(num_tests, progress_indicator, context)
+    self._CommonInit(suites, progress_indicator, context)
     self.tests = []  # Only used if we need to fall back to local execution.
     self.tests_lock = threading.Lock()
     self.peers = peers
diff --git a/tools/testrunner/objects/context.py b/tools/testrunner/objects/context.py
index 937d908..c9853d0 100644
--- a/tools/testrunner/objects/context.py
+++ b/tools/testrunner/objects/context.py
@@ -30,7 +30,7 @@
   def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
                isolates, command_prefix, extra_flags, noi18n, random_seed,
                no_sorting, rerun_failures_count, rerun_failures_max,
-               predictable):
+               predictable, no_harness, use_perf_data):
     self.arch = arch
     self.mode = mode
     self.shell_dir = shell_dir
@@ -46,16 +46,20 @@
     self.rerun_failures_count = rerun_failures_count
     self.rerun_failures_max = rerun_failures_max
     self.predictable = predictable
+    self.no_harness = no_harness
+    self.use_perf_data = use_perf_data
 
   def Pack(self):
     return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
             self.command_prefix, self.extra_flags, self.noi18n,
             self.random_seed, self.no_sorting, self.rerun_failures_count,
-            self.rerun_failures_max, self.predictable]
+            self.rerun_failures_max, self.predictable, self.no_harness,
+            self.use_perf_data]
 
   @staticmethod
   def Unpack(packed):
     # For the order of the fields, refer to Pack() above.
     return Context(packed[0], packed[1], None, packed[2], False,
                    packed[3], packed[4], packed[5], packed[6], packed[7],
-                   packed[8], packed[9], packed[10], packed[11], packed[12])
+                   packed[8], packed[9], packed[10], packed[11], packed[12],
+                   packed[13], packed[14])
diff --git a/tools/testrunner/objects/testcase.py b/tools/testrunner/objects/testcase.py
index 6c55082..fa2265c 100644
--- a/tools/testrunner/objects/testcase.py
+++ b/tools/testrunner/objects/testcase.py
@@ -29,19 +29,22 @@
 from . import output
 
 class TestCase(object):
-  def __init__(self, suite, path, flags=None, dependency=None):
+  def __init__(self, suite, path, variant='default', flags=None,
+               dependency=None):
     self.suite = suite        # TestSuite object
     self.path = path          # string, e.g. 'div-mod', 'test-api/foo'
     self.flags = flags or []  # list of strings, flags specific to this test
+    self.variant = variant    # name of the used testing variant
     self.dependency = dependency  # |path| for testcase that must be run first
-    self.outcomes = None
+    self.outcomes = set([])
     self.output = None
     self.id = None  # int, used to map result back to TestCase instance
     self.duration = None  # assigned during execution
     self.run = 1  # The nth time this test is executed.
 
-  def CopyAddingFlags(self, flags):
-    copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
+  def CopyAddingFlags(self, variant, flags):
+    copy = TestCase(self.suite, self.path, variant, self.flags + flags,
+                    self.dependency)
     copy.outcomes = self.outcomes
     return copy
 
@@ -51,16 +54,16 @@
     and returns them as a JSON serializable object.
     """
     assert self.id is not None
-    return [self.suitename(), self.path, self.flags,
+    return [self.suitename(), self.path, self.variant, self.flags,
             self.dependency, list(self.outcomes or []), self.id]
 
   @staticmethod
   def UnpackTask(task):
     """Creates a new TestCase object based on packed task data."""
     # For the order of the fields, refer to PackTask() above.
-    test = TestCase(str(task[0]), task[1], task[2], task[3])
-    test.outcomes = set(task[4])
-    test.id = task[5]
+    test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
+    test.outcomes = set(task[5])
+    test.id = task[6]
     test.run = 1
     return test
 
@@ -83,3 +86,11 @@
 
   def GetLabel(self):
     return self.suitename() + "/" + self.suite.CommonTestName(self)
+
+  def __getstate__(self):
+    """Representation to pickle test cases.
+
+    The original suite won't be sent beyond process boundaries. Instead
+    send the name only and retrieve a process-local suite later.
+    """
+    return dict(self.__dict__, suite=self.suite.name)
diff --git a/tools/testrunner/testrunner.isolate b/tools/testrunner/testrunner.isolate
new file mode 100644
index 0000000..669614b
--- /dev/null
+++ b/tools/testrunner/testrunner.isolate
@@ -0,0 +1,14 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'variables': {
+    'command': [
+      '../run-tests.py',
+    ],
+    'files': [
+      '../run-tests.py',
+      './'
+    ],
+  },
+}
\ No newline at end of file