Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/tools/run-tests.py b/tools/run-tests.py
index d68d1f8..fe8091e 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -44,36 +44,48 @@
 from testrunner.local import execution
 from testrunner.local import progress
 from testrunner.local import testsuite
-from testrunner.local.testsuite import VARIANT_FLAGS
+from testrunner.local.testsuite import ALL_VARIANTS
 from testrunner.local import utils
 from testrunner.local import verbose
 from testrunner.network import network_execution
 from testrunner.objects import context
 
 
+# Base dir of the v8 checkout to be used as cwd.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
 ARCH_GUESS = utils.DefaultArch()
-DEFAULT_TESTS = [
-  "mjsunit",
-  "unittests",
-  "cctest",
-  "message",
-  "preparser",
-]
 
 # Map of test name synonyms to lists of test suites. Should be ordered by
 # expected runtimes (suites with slow test cases first). These groups are
 # invoked in seperate steps on the bots.
 TEST_MAP = {
+  "bot_default": [
+    "mjsunit",
+    "cctest",
+    "webkit",
+    "message",
+    "preparser",
+    "intl",
+    "unittests",
+  ],
   "default": [
     "mjsunit",
     "cctest",
     "message",
     "preparser",
+    "intl",
+    "unittests",
+  ],
+  "ignition": [
+    "mjsunit",
+    "cctest",
   ],
   "optimize_for_size": [
     "mjsunit",
     "cctest",
     "webkit",
+    "intl",
   ],
   "unittests": [
     "unittests",
@@ -81,17 +93,61 @@
 }
 
 TIMEOUT_DEFAULT = 60
-TIMEOUT_SCALEFACTOR = {"debug"   : 4,
-                       "release" : 1 }
 
-VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
+VARIANTS = ["default", "stress", "turbofan"]
 
-MODE_FLAGS = {
-    "debug"   : ["--nohard-abort", "--nodead-code-elimination",
-                 "--nofold-constants", "--enable-slow-asserts",
-                 "--debug-code", "--verify-heap"],
-    "release" : ["--nohard-abort", "--nodead-code-elimination",
-                 "--nofold-constants"]}
+EXHAUSTIVE_VARIANTS = VARIANTS + [
+  "nocrankshaft",
+  "turbofan_opt",
+]
+
+DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
+               "--nofold-constants", "--enable-slow-asserts",
+               "--debug-code", "--verify-heap"]
+RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
+                 "--nofold-constants"]
+
+MODES = {
+  "debug": {
+    "flags": DEBUG_FLAGS,
+    "timeout_scalefactor": 4,
+    "status_mode": "debug",
+    "execution_mode": "debug",
+    "output_folder": "debug",
+  },
+  "optdebug": {
+    "flags": DEBUG_FLAGS,
+    "timeout_scalefactor": 4,
+    "status_mode": "debug",
+    "execution_mode": "debug",
+    "output_folder": "optdebug",
+  },
+  "release": {
+    "flags": RELEASE_FLAGS,
+    "timeout_scalefactor": 1,
+    "status_mode": "release",
+    "execution_mode": "release",
+    "output_folder": "release",
+  },
+  # Normal trybot release configuration. There, dchecks are always on which
+  # implies debug is set. Hence, the status file needs to assume debug-like
+  # behavior/timeouts.
+  "tryrelease": {
+    "flags": RELEASE_FLAGS,
+    "timeout_scalefactor": 1,
+    "status_mode": "debug",
+    "execution_mode": "release",
+    "output_folder": "release",
+  },
+  # This mode requires v8 to be compiled with dchecks and slow dchecks.
+  "slowrelease": {
+    "flags": RELEASE_FLAGS + ["--enable-slow-asserts"],
+    "timeout_scalefactor": 2,
+    "status_mode": "debug",
+    "execution_mode": "release",
+    "output_folder": "release",
+  },
+}
 
 GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
                    "--concurrent-recompilation-queue-length=64",
@@ -101,14 +157,18 @@
 SUPPORTED_ARCHS = ["android_arm",
                    "android_arm64",
                    "android_ia32",
+                   "android_x64",
                    "arm",
                    "ia32",
                    "x87",
                    "mips",
                    "mipsel",
+                   "mips64",
                    "mips64el",
                    "nacl_ia32",
                    "nacl_x64",
+                   "ppc",
+                   "ppc64",
                    "x64",
                    "x32",
                    "arm64"]
@@ -116,9 +176,11 @@
 SLOW_ARCHS = ["android_arm",
               "android_arm64",
               "android_ia32",
+              "android_x64",
               "arm",
               "mips",
               "mipsel",
+              "mips64",
               "mips64el",
               "nacl_ia32",
               "nacl_x64",
@@ -128,9 +190,11 @@
 
 def BuildOptions():
   result = optparse.OptionParser()
+  result.usage = '%prog [options] [tests]'
+  result.description = """TESTS: %s""" % (TEST_MAP["default"])
   result.add_option("--arch",
                     help=("The architecture to run tests for, "
-                          "'auto' or 'native' for auto-detect"),
+                          "'auto' or 'native' for auto-detect: %s" % SUPPORTED_ARCHS),
                     default="ia32,x64,arm")
   result.add_option("--arch-and-mode",
                     help="Architecture and mode in the format 'arch.mode'",
@@ -138,12 +202,18 @@
   result.add_option("--asan",
                     help="Regard test expectations for ASAN",
                     default=False, action="store_true")
+  result.add_option("--cfi-vptr",
+                    help="Run tests with UBSAN cfi_vptr option.",
+                    default=False, action="store_true")
   result.add_option("--buildbot",
                     help="Adapt to path structure used on buildbots",
                     default=False, action="store_true")
   result.add_option("--dcheck-always-on",
                     help="Indicates that V8 was compiled with DCHECKs enabled",
                     default=False, action="store_true")
+  result.add_option("--novfp3",
+                    help="Indicates that V8 was compiled without VFP3 support",
+                    default=False, action="store_true")
   result.add_option("--cat", help="Print the source of the tests",
                     default=False, action="store_true")
   result.add_option("--flaky-tests",
@@ -158,21 +228,33 @@
   result.add_option("--gc-stress",
                     help="Switch on GC stress mode",
                     default=False, action="store_true")
+  result.add_option("--gcov-coverage",
+                    help="Uses executables instrumented for gcov coverage",
+                    default=False, action="store_true")
   result.add_option("--command-prefix",
                     help="Prepended to each shell command used to run a test",
                     default="")
   result.add_option("--download-data", help="Download missing test suite data",
                     default=False, action="store_true")
+  result.add_option("--download-data-only",
+                    help="Download missing test suite data and exit",
+                    default=False, action="store_true")
   result.add_option("--extra-flags",
                     help="Additional flags to pass to each test command",
                     default="")
+  result.add_option("--ignition", help="Skip tests which don't run in ignition",
+                    default=False, action="store_true")
   result.add_option("--isolates", help="Whether to test isolates",
                     default=False, action="store_true")
   result.add_option("-j", help="The number of parallel tasks to run",
                     default=0, type="int")
   result.add_option("-m", "--mode",
-                    help="The test modes in which to run (comma-separated)",
+                    help="The test modes in which to run (comma-separated,"
+                    " uppercase for ninja and buildbot builds): %s" % MODES.keys(),
                     default="release,debug")
+  result.add_option("--no-harness", "--noharness",
+                    help="Run without test harness of a given suite",
+                    default=False, action="store_true")
   result.add_option("--no-i18n", "--noi18n",
                     help="Skip internationalization tests",
                     default=False, action="store_true")
@@ -196,7 +278,10 @@
                     help="Don't run any testing variants",
                     default=False, dest="no_variants", action="store_true")
   result.add_option("--variants",
-                    help="Comma-separated list of testing variants")
+                    help="Comma-separated list of testing variants: %s" % VARIANTS)
+  result.add_option("--exhaustive-variants",
+                    default=False, action="store_true",
+                    help="Use exhaustive set of default variants.")
   result.add_option("--outdir", help="Base directory with compile output",
                     default="out")
   result.add_option("--predictable",
@@ -235,6 +320,9 @@
   result.add_option("--stress-only",
                     help="Only run tests with --always-opt --stress-opt",
                     default=False, action="store_true")
+  result.add_option("--swarming",
+                    help="Indicates running test driver on swarming.",
+                    default=False, action="store_true")
   result.add_option("--time", help="Print timing information after running",
                     default=False, action="store_true")
   result.add_option("-t", "--timeout", help="Timeout in seconds",
@@ -252,16 +340,71 @@
   result.add_option("--junittestsuite",
                     help="The testsuite name in the JUnit output file",
                     default="v8tests")
-  result.add_option("--random-seed", default=0, dest="random_seed",
+  result.add_option("--random-seed", default=0, dest="random_seed", type="int",
                     help="Default seed for initializing random generator")
+  result.add_option("--random-seed-stress-count", default=1, type="int",
+                    dest="random_seed_stress_count",
+                    help="Number of runs with different random seeds")
   result.add_option("--msan",
                     help="Regard test expectations for MSAN",
                     default=False, action="store_true")
   return result
 
 
+def RandomSeed():
+  seed = 0
+  while not seed:
+    seed = random.SystemRandom().randint(-2147483648, 2147483647)
+  return seed
+
+
+def BuildbotToV8Mode(config):
+  """Convert buildbot build configs to configs understood by the v8 runner.
+
+  V8 configs are always lower case and without the additional _x64 suffix for
+  64 bit builds on windows with ninja.
+  """
+  mode = config[:-4] if config.endswith('_x64') else config
+  return mode.lower()
+
+def SetupEnvironment(options):
+  """Setup additional environment variables."""
+  symbolizer = 'external_symbolizer_path=%s' % (
+      os.path.join(
+          BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
+          'llvm-symbolizer',
+      )
+  )
+
+  if options.asan:
+    os.environ['ASAN_OPTIONS'] = symbolizer
+
+  if options.cfi_vptr:
+    os.environ['UBSAN_OPTIONS'] = ":".join([
+      'print_stacktrace=1',
+      'print_summary=1',
+      'symbolize=1',
+      symbolizer,
+    ])
+
+  if options.msan:
+    os.environ['MSAN_OPTIONS'] = symbolizer
+
+  if options.tsan:
+    suppressions_file = os.path.join(
+        BASE_DIR, 'tools', 'sanitizers', 'tsan_suppressions.txt')
+    os.environ['TSAN_OPTIONS'] = " ".join([
+      symbolizer,
+      'suppressions=%s' % suppressions_file,
+      'exit_code=0',
+      'report_thread_leaks=0',
+      'history_size=7',
+      'report_destroy_locked=0',
+    ])
+
 def ProcessOptions(options):
-  global VARIANT_FLAGS
+  global ALL_VARIANTS
+  global EXHAUSTIVE_VARIANTS
   global VARIANTS
 
   # Architecture and mode related stuff.
@@ -272,7 +415,7 @@
     options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
   options.mode = options.mode.split(",")
   for mode in options.mode:
-    if not mode.lower() in ["debug", "release", "optdebug"]:
+    if not BuildbotToV8Mode(mode) in MODES:
       print "Unknown mode %s" % mode
       return False
   if options.arch in ["auto", "native"]:
@@ -294,6 +437,8 @@
     # Buildbots run presubmit tests as a separate step.
     options.no_presubmit = True
     options.no_network = True
+  if options.download_data_only:
+    options.no_presubmit = True
   if options.command_prefix:
     print("Specifying --command-prefix disables network distribution, "
           "running tests locally.")
@@ -306,20 +451,27 @@
 
   if options.asan:
     options.extra_flags.append("--invoke-weak-callbacks")
+    options.extra_flags.append("--omit-quit")
+
+  if options.novfp3:
+    options.extra_flags.append("--noenable-vfp3")
+
+  if options.exhaustive_variants:
+    # This is used on many bots. It includes a larger set of default variants.
+    # Other options for manipulating variants still apply afterwards.
+    VARIANTS = EXHAUSTIVE_VARIANTS
+
+  if options.msan:
+    VARIANTS = ["default"]
 
   if options.tsan:
     VARIANTS = ["default"]
-    suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
-                                     'sanitizers', 'tsan_suppressions.txt')
-    tsan_options = '%s suppressions=%s' % (
-        os.environ.get('TSAN_OPTIONS', ''), suppressions_file)
-    os.environ['TSAN_OPTIONS'] = tsan_options
 
   if options.j == 0:
     options.j = multiprocessing.cpu_count()
 
-  while options.random_seed == 0:
-    options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)
+  if options.random_seed_stress_count <= 1 and options.random_seed == 0:
+    options.random_seed = RandomSeed()
 
   def excl(*args):
     """Returns true if zero or one of multiple arguments are true."""
@@ -343,8 +495,8 @@
     VARIANTS = ["stress"]
   if options.variants:
     VARIANTS = options.variants.split(",")
-    if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
-      print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
+    if not set(VARIANTS).issubset(ALL_VARIANTS):
+      print "All variants must be in %s" % str(ALL_VARIANTS)
       return False
   if options.predictable:
     VARIANTS = ["default"]
@@ -372,12 +524,33 @@
     return False
   if not CheckTestMode("pass|fail test", options.pass_fail_tests):
     return False
-  if not options.no_i18n:
-    DEFAULT_TESTS.append("intl")
+  if options.no_i18n:
+    TEST_MAP["bot_default"].remove("intl")
+    TEST_MAP["default"].remove("intl")
   return True
 
 
-def ShardTests(tests, shard_count, shard_run):
+def ShardTests(tests, options):
+  # Read gtest shard configuration from environment (e.g. set by swarming).
+  # If none is present, use values passed on the command line.
+  shard_count = int(os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+  shard_run = os.environ.get('GTEST_SHARD_INDEX')
+  if shard_run is not None:
+    # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
+    shard_run = int(shard_run) + 1
+  else:
+    shard_run = options.shard_run
+
+  if options.shard_count > 1:
+    # Log if a value was passed on the cmd line and it differs from the
+    # environment variables.
+    if options.shard_count != shard_count:
+      print("shard_count from cmd line differs from environment variable "
+            "GTEST_TOTAL_SHARDS")
+    if options.shard_run > 1 and options.shard_run != shard_run:
+      print("shard_run from cmd line differs from environment variable "
+            "GTEST_SHARD_INDEX")
+
   if shard_count < 2:
     return tests
   if shard_run < 1 or shard_run > shard_count:
@@ -394,78 +567,89 @@
 
 
 def Main():
+  # Use the v8 root as cwd as some test cases use "load" with relative paths.
+  os.chdir(BASE_DIR)
+
   parser = BuildOptions()
   (options, args) = parser.parse_args()
   if not ProcessOptions(options):
     parser.print_help()
     return 1
+  SetupEnvironment(options)
 
   exit_code = 0
-  workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
   if not options.no_presubmit:
     print ">>> running presubmit tests"
     exit_code = subprocess.call(
-        [sys.executable, join(workspace, "tools", "presubmit.py")])
+        [sys.executable, join(BASE_DIR, "tools", "presubmit.py")])
 
-  suite_paths = utils.GetSuitePaths(join(workspace, "test"))
+  suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
+
+  # Use default tests if no test configuration was provided at the cmd line.
+  if len(args) == 0:
+    args = ["default"]
 
   # Expand arguments with grouped tests. The args should reflect the list of
   # suites as otherwise filters would break.
   def ExpandTestGroups(name):
     if name in TEST_MAP:
-      return [suite for suite in TEST_MAP[arg]]
+      return [suite for suite in TEST_MAP[name]]
     else:
       return [name]
   args = reduce(lambda x, y: x + y,
          [ExpandTestGroups(arg) for arg in args],
          [])
 
-  if len(args) == 0:
-    suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
-  else:
-    args_suites = OrderedDict() # Used as set
-    for arg in args:
-      args_suites[arg.split(os.path.sep)[0]] = True
-    suite_paths = [ s for s in args_suites if s in suite_paths ]
+  args_suites = OrderedDict() # Used as set
+  for arg in args:
+    args_suites[arg.split('/')[0]] = True
+  suite_paths = [ s for s in args_suites if s in suite_paths ]
 
   suites = []
   for root in suite_paths:
     suite = testsuite.TestSuite.LoadTestSuite(
-        os.path.join(workspace, "test", root))
+        os.path.join(BASE_DIR, "test", root))
     if suite:
+      suite.SetupWorkingDirectory()
       suites.append(suite)
 
-  if options.download_data:
+  if options.download_data or options.download_data_only:
     for s in suites:
       s.DownloadData()
 
+  if options.download_data_only:
+    return exit_code
+
   for (arch, mode) in options.arch_and_mode:
     try:
-      code = Execute(arch, mode, args, options, suites, workspace)
+      code = Execute(arch, mode, args, options, suites)
     except KeyboardInterrupt:
       return 2
     exit_code = exit_code or code
   return exit_code
 
 
-def Execute(arch, mode, args, options, suites, workspace):
+def Execute(arch, mode, args, options, suites):
   print(">>> Running tests for %s.%s" % (arch, mode))
 
   shell_dir = options.shell_dir
   if not shell_dir:
     if options.buildbot:
-      shell_dir = os.path.join(workspace, options.outdir, mode)
-      mode = mode.lower()
+      # TODO(machenbach): Get rid of different output folder location on
+      # buildbot. Currently this is capitalized Release and Debug.
+      shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
+      mode = BuildbotToV8Mode(mode)
     else:
-      shell_dir = os.path.join(workspace, options.outdir,
-                               "%s.%s" % (arch, mode))
-  shell_dir = os.path.relpath(shell_dir)
-
-  if mode == "optdebug":
-    mode = "debug"  # "optdebug" is just an alias.
+      shell_dir = os.path.join(
+          BASE_DIR,
+          options.outdir,
+          "%s.%s" % (arch, MODES[mode]["output_folder"]),
+      )
+  if not os.path.exists(shell_dir):
+      raise Exception('Could not find shell_dir: "%s"' % shell_dir)
 
   # Populate context object.
-  mode_flags = MODE_FLAGS[mode]
+  mode_flags = MODES[mode]["flags"]
   timeout = options.timeout
   if timeout == -1:
     # Simulators are slow, therefore allow a longer default timeout.
@@ -474,14 +658,20 @@
     else:
       timeout = TIMEOUT_DEFAULT;
 
-  timeout *= TIMEOUT_SCALEFACTOR[mode]
+  timeout *= MODES[mode]["timeout_scalefactor"]
 
   if options.predictable:
     # Predictable mode is slower.
     timeout *= 2
 
-  ctx = context.Context(arch, mode, shell_dir,
-                        mode_flags, options.verbose,
+  # TODO(machenbach): Remove temporary verbose output on windows after
+  # debugging driver-hung-up on XP.
+  verbose_output = (
+      options.verbose or
+      utils.IsWindows() and options.progress == "verbose"
+  )
+  ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
+                        mode_flags, verbose_output,
                         timeout, options.isolates,
                         options.command_prefix,
                         options.extra_flags,
@@ -490,11 +680,14 @@
                         options.no_sorting,
                         options.rerun_failures_count,
                         options.rerun_failures_max,
-                        options.predictable)
+                        options.predictable,
+                        options.no_harness,
+                        use_perf_data=not options.swarming)
 
   # TODO(all): Combine "simulator" and "simulator_run".
   simulator_run = not options.dont_skip_simulator_slow_tests and \
-      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el'] and \
+      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
+               'ppc', 'ppc64'] and \
       ARCH_GUESS and arch != ARCH_GUESS
   # Find available test suites and read test cases from them.
   variables = {
@@ -502,8 +695,10 @@
     "asan": options.asan,
     "deopt_fuzzer": False,
     "gc_stress": options.gc_stress,
+    "gcov_coverage": options.gcov_coverage,
+    "ignition": options.ignition,
     "isolates": options.isolates,
-    "mode": mode,
+    "mode": MODES[mode]["status_mode"],
     "no_i18n": options.no_i18n,
     "no_snap": options.no_snap,
     "simulator_run": simulator_run,
@@ -512,10 +707,12 @@
     "tsan": options.tsan,
     "msan": options.msan,
     "dcheck_always_on": options.dcheck_always_on,
+    "novfp3": options.novfp3,
+    "predictable": options.predictable,
+    "byteorder": sys.byteorder,
   }
   all_tests = []
   num_tests = 0
-  test_id = 0
   for s in suites:
     s.ReadStatusFile(variables)
     s.ReadTestCases(ctx)
@@ -527,15 +724,32 @@
     if options.cat:
       verbose.PrintTestSource(s.tests)
       continue
-    variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
-    s.tests = [ t.CopyAddingFlags(v)
-                for t in s.tests
-                for v in s.VariantFlags(t, variant_flags) ]
-    s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
+    variant_gen = s.CreateVariantGenerator(VARIANTS)
+    variant_tests = [ t.CopyAddingFlags(v, flags)
+                      for t in s.tests
+                      for v in variant_gen.FilterVariantsByTest(t)
+                      for flags in variant_gen.GetFlagSets(t, v) ]
+
+    if options.random_seed_stress_count > 1:
+      # Duplicate test for random seed stress mode.
+      def iter_seed_flags():
+        for i in range(0, options.random_seed_stress_count):
+          # Use given random seed for all runs (set by default in execution.py)
+          # or a new random seed if none is specified.
+          if options.random_seed:
+            yield []
+          else:
+            yield ["--random-seed=%d" % RandomSeed()]
+      s.tests = [
+        t.CopyAddingFlags(t.variant, flags)
+        for t in variant_tests
+        for flags in iter_seed_flags()
+      ]
+    else:
+      s.tests = variant_tests
+
+    s.tests = ShardTests(s.tests, options)
     num_tests += len(s.tests)
-    for t in s.tests:
-      t.id = test_id
-      test_id += 1
 
   if options.cat:
     return 0  # We're done here.
@@ -543,23 +757,22 @@
   if options.report:
     verbose.PrintReport(all_tests)
 
-  if num_tests == 0:
-    print "No tests to run."
-    return 0
-
   # Run the tests, either locally or distributed on the network.
   start_time = time.time()
-  progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+  progress_indicator = progress.IndicatorNotifier()
+  progress_indicator.Register(progress.PROGRESS_INDICATORS[options.progress]())
   if options.junitout:
-    progress_indicator = progress.JUnitTestProgressIndicator(
-        progress_indicator, options.junitout, options.junittestsuite)
+    progress_indicator.Register(progress.JUnitTestProgressIndicator(
+        options.junitout, options.junittestsuite))
   if options.json_test_results:
-    progress_indicator = progress.JsonTestProgressIndicator(
-        progress_indicator, options.json_test_results, arch, mode)
+    progress_indicator.Register(progress.JsonTestProgressIndicator(
+        options.json_test_results, arch, MODES[mode]["execution_mode"],
+        ctx.random_seed))
 
   run_networked = not options.no_network
   if not run_networked:
-    print("Network distribution disabled, running tests locally.")
+    if verbose_output:
+      print("Network distribution disabled, running tests locally.")
   elif utils.GuessOS() != "linux":
     print("Network distribution is only supported on Linux, sorry!")
     run_networked = False
@@ -578,7 +791,7 @@
 
   if run_networked:
     runner = network_execution.NetworkedRunner(suites, progress_indicator,
-                                               ctx, peers, workspace)
+                                               ctx, peers, BASE_DIR)
   else:
     runner = execution.Runner(suites, progress_indicator, ctx)
 
@@ -587,6 +800,15 @@
 
   if options.time:
     verbose.PrintTestDurations(suites, overall_duration)
+
+  if num_tests == 0:
+    print("Warning: no tests were run!")
+
+  if exit_code == 1 and options.json_test_results:
+    print("Force exit code 0 after failures. Json test results file generated "
+          "with failure information.")
+    exit_code = 0
+
   return exit_code