Upgrade V8 to 5.1.281.57  DO NOT MERGE

FPIIM-449

Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/tools/eval_gc_nvp.py b/tools/eval_gc_nvp.py
index f18a579..fcb6d8b 100755
--- a/tools/eval_gc_nvp.py
+++ b/tools/eval_gc_nvp.py
@@ -74,10 +74,11 @@
 
 
 class Category:
-  def __init__(self, key, histogram):
+  def __init__(self, key, histogram, csv):
     self.key = key
     self.values = []
     self.histogram = histogram
+    self.csv = csv
 
   def process_entry(self, entry):
     if self.key in entry:
@@ -92,18 +93,32 @@
     return max(self.values)
 
   def avg(self):
+    if len(self.values) == 0:
+      return 0.0
     return sum(self.values) / len(self.values)
 
+  def empty(self):
+    return len(self.values) == 0
+
   def __str__(self):
-    ret = [self.key]
-    ret.append("  len: {0}".format(len(self.values)))
-    if len(self.values) > 0:
-      ret.append("  min: {0}".format(min(self.values)))
-      ret.append("  max: {0}".format(max(self.values)))
-      ret.append("  avg: {0}".format(sum(self.values) / len(self.values)))
-      if self.histogram:
-        ret.append(str(self.histogram))
-    return "\n".join(ret)
+    if self.csv:
+      ret = [self.key]
+      ret.append(len(self.values))
+      ret.append(self.min())
+      ret.append(self.max())
+      ret.append(self.avg())
+      ret = [str(x) for x in ret]
+      return ",".join(ret)
+    else:
+      ret = [self.key]
+      ret.append("  len: {0}".format(len(self.values)))
+      if len(self.values) > 0:
+        ret.append("  min: {0}".format(self.min()))
+        ret.append("  max: {0}".format(self.max()))
+        ret.append("  avg: {0}".format(self.avg()))
+        if self.histogram:
+          ret.append(str(self.histogram))
+      return "\n".join(ret)
 
   def __repr__(self):
     return "<Category: {0}>".format(self.key)
@@ -143,6 +158,8 @@
                       type=str, nargs='?',
                       default="no",
                       help="rank keys by metric (default: no)")
+  parser.add_argument('--csv', dest='csv',
+                      action='store_true', help='provide output as csv')
   args = parser.parse_args()
 
   histogram = None
@@ -154,7 +171,7 @@
       bucket_trait = LinearBucket(args.linear_histogram_granularity)
     histogram = Histogram(bucket_trait, not args.histogram_omit_empty)
 
-  categories = [ Category(key, deepcopy(histogram))
+  categories = [ Category(key, deepcopy(histogram), args.csv)
                  for key in args.keys ]
 
   while True:
@@ -165,6 +182,9 @@
     for category in categories:
       category.process_entry(obj)
 
+  # Filter out empty categories.
+  categories = [x for x in categories if not x.empty()]
+
   if args.rank != "no":
     categories = sorted(categories, key=make_key_func(args.rank), reverse=True)
 
diff --git a/tools/eval_gc_time.sh b/tools/eval_gc_time.sh
index 92246d3..ceb4db5 100755
--- a/tools/eval_gc_time.sh
+++ b/tools/eval_gc_time.sh
@@ -7,38 +7,73 @@
 # Convenience Script used to rank GC NVP output.
 
 print_usage_and_die() {
-  echo "Usage: $0 new-gen-rank|old-gen-rank max|avg logfile"
+  echo "Usage: $0 [OPTIONS]"
+  echo ""
+  echo "OPTIONS"
+  echo  "  -r|--rank new-gen-rank|old-gen-rank    GC mode to profile"
+  echo  "                                         (default: old-gen-rank)"
+  echo  "  -s|--sort avg|max                      sorting mode (default: max)"
+  echo  "  -t|--top-level                         include top-level categories"
+  echo  "  -c|--csv                               provide csv output"
+  echo  "  -f|--file FILE                         profile input in a file"
+  echo  "                                         (default: stdin)"
   exit 1
 }
 
-if [ $# -ne 3 ]; then
+OP=old-gen-rank
+RANK_MODE=max
+TOP_LEVEL=no
+CSV=""
+LOGFILE=/dev/stdin
+
+while [[ $# -ge 1 ]]
+do
+  key="$1"
+  case $key in
+    -r|--rank)
+      case $2 in
+        new-gen-rank|old-gen-rank)
+          OP="$2"
+          ;;
+        *)
+          print_usage_and_die
+      esac
+      shift
+      ;;
+    -s|--sort)
+      case $2 in
+        max|avg)
+          RANK_MODE=$2
+          ;;
+        *)
+          print_usage_and_die
+      esac
+      shift
+      ;;
+    -t|--top-level)
+      TOP_LEVEL=yes
+      ;;
+    -c|--csv)
+      CSV=" --csv "
+      ;;
+    -f|--file)
+      LOGFILE=$2
+      shift
+      ;;
+    *)
+      break
+      ;;
+  esac
+  shift
+done
+
+if [[ $# -ne 0 ]]; then
+  echo "Unknown option(s): $@"
+  echo ""
   print_usage_and_die
 fi
 
-case $1 in
-  new-gen-rank|old-gen-rank)
-    OP=$1
-    ;;
-  *)
-    print_usage_and_die
-esac
-
-case $2 in 
-  max|avg)
-    RANK_MODE=$2
-    ;;
-  *)
-    print_usage_and_die
-esac
-
-LOGFILE=$3
-
-GENERAL_INTERESTING_KEYS="\
-  pause \
-"
-
 INTERESTING_NEW_GEN_KEYS="\
-  ${GENERAL_INTERESTING_KEYS} \
   scavenge \
   weak \
   roots \
@@ -49,9 +84,6 @@
 "
 
 INTERESTING_OLD_GEN_KEYS="\
-  ${GENERAL_INTERESTING_KEYS} \
-  external \
-  clear \
   clear.code_flush \
   clear.dependent_code \
   clear.global_handles \
@@ -62,28 +94,48 @@
   clear.weak_cells \
   clear.weak_collections \
   clear.weak_lists \
-  finish \
-  evacuate \
   evacuate.candidates \
   evacuate.clean_up \
-  evacuate.new_space \
+  evacuate.copy \
   evacuate.update_pointers \
   evacuate.update_pointers.between_evacuated \
   evacuate.update_pointers.to_evacuated \
   evacuate.update_pointers.to_new \
   evacuate.update_pointers.weak \
-  mark \
+  external.mc_prologue \
+  external.mc_epilogue \
+  external.mc_incremental_prologue \
+  external.mc_incremental_epilogue \
+  external.weak_global_handles \
   mark.finish_incremental \
   mark.prepare_code_flush \
   mark.roots \
   mark.weak_closure \
-  sweep \
+  mark.weak_closure.ephemeral \
+  mark.weak_closure.weak_handles \
+  mark.weak_closure.weak_roots \
+  mark.weak_closure.harmony \
   sweep.code \
   sweep.map \
   sweep.old \
-  incremental_finalize \
 "
 
+if [[ "$TOP_LEVEL" = "yes" ]]; then
+  INTERESTING_OLD_GEN_KEYS="\
+    ${INTERESTING_OLD_GEN_KEYS} \
+    clear \
+    evacuate \
+    finish \
+    incremental_finalize \
+    mark \
+    pause
+    sweep \
+  "
+  INTERESTING_NEW_GEN_KEYS="\
+    ${INTERESTING_NEW_GEN_KEYS} \
+  "
+fi
+
 BASE_DIR=$(dirname $0)
 
 case $OP in
@@ -92,16 +144,17 @@
       | $BASE_DIR/eval_gc_nvp.py \
       --no-histogram \
       --rank $RANK_MODE \
+      $CSV \
       ${INTERESTING_NEW_GEN_KEYS}
     ;;
   old-gen-rank)
-    cat $LOGFILE | grep "gc=ms" | grep "reduce_memory=0" | grep -v "steps=0" \
+    cat $LOGFILE | grep "gc=ms" \
       | $BASE_DIR/eval_gc_nvp.py \
       --no-histogram \
       --rank $RANK_MODE \
+      $CSV \
       ${INTERESTING_OLD_GEN_KEYS}
     ;;
   *)
     ;;
 esac
-
diff --git a/tools/external-reference-check.py b/tools/external-reference-check.py
index 287eca4..be01dec 100644
--- a/tools/external-reference-check.py
+++ b/tools/external-reference-check.py
@@ -8,7 +8,7 @@
 import sys
 
 DECLARE_FILE = "src/assembler.h"
-REGISTER_FILE = "src/snapshot/serialize.cc"
+REGISTER_FILE = "src/external-reference-table.cc"
 DECLARE_RE = re.compile("\s*static ExternalReference ([^(]+)\(")
 REGISTER_RE = re.compile("\s*Add\(ExternalReference::([^(]+)\(")
 
diff --git a/tools/gcmole/gcmole.lua b/tools/gcmole/gcmole.lua
index 9739684..82ea4e0 100644
--- a/tools/gcmole/gcmole.lua
+++ b/tools/gcmole/gcmole.lua
@@ -111,6 +111,7 @@
       .. " -DENABLE_DEBUGGER_SUPPORT"
       .. " -DV8_I18N_SUPPORT"
       .. " -I./"
+      .. " -Iinclude/"
       .. " -Ithird_party/icu/source/common"
       .. " -Ithird_party/icu/source/i18n"
       .. " " .. arch_options
diff --git a/tools/gen-postmortem-metadata.py b/tools/gen-postmortem-metadata.py
index d808a2f..a0afc06 100644
--- a/tools/gen-postmortem-metadata.py
+++ b/tools/gen-postmortem-metadata.py
@@ -92,8 +92,6 @@
         'value': 'DescriptorArray::kFirstIndex' },
     { 'name': 'prop_type_field',
         'value': 'DATA' },
-    { 'name': 'prop_type_const_field',
-        'value': 'DATA_CONSTANT' },
     { 'name': 'prop_type_mask',
         'value': 'PropertyDetails::TypeField::kMask' },
     { 'name': 'prop_index_mask',
@@ -156,8 +154,6 @@
         'value': 'StandardFrameConstants::kContextOffset' },
     { 'name': 'off_fp_constant_pool',
         'value': 'StandardFrameConstants::kConstantPoolOffset' },
-    { 'name': 'off_fp_marker',
-        'value': 'StandardFrameConstants::kMarkerOffset' },
     { 'name': 'off_fp_function',
         'value': 'JavaScriptFrameConstants::kFunctionOffset' },
     { 'name': 'off_fp_args',
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 66f579d..b09fd1f 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -32,6 +32,7 @@
     'v8_random_seed%': 314159265,
     'v8_vector_stores%': 0,
     'embed_script%': "",
+    'warmup_script%': "",
     'v8_extra_library_files%': [],
     'v8_experimental_extra_library_files%': [],
     'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
@@ -204,15 +205,13 @@
           'inputs': [
             '<(mksnapshot_exec)',
             '<(embed_script)',
+            '<(warmup_script)',
           ],
           'outputs': [
             '<(INTERMEDIATE_DIR)/snapshot.cc',
           ],
           'variables': {
-            'mksnapshot_flags': [
-              '--log-snapshot-positions',
-              '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
-            ],
+            'mksnapshot_flags': [],
             'conditions': [
               ['v8_random_seed!=0', {
                 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
@@ -227,6 +226,7 @@
             '<@(mksnapshot_flags)',
             '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
             '<(embed_script)',
+            '<(warmup_script)',
           ],
         },
       ],
@@ -308,8 +308,6 @@
                     # variable.
                     'mksnapshot_flags_ignition': [
                       '--ignition',
-                      '--log-snapshot-positions',
-                      '--logfile', '<(INTERMEDIATE_DIR)/snapshot_ignition.log',
                     ],
                     'conditions': [
                       ['v8_random_seed!=0', {
@@ -330,6 +328,7 @@
                             '<@(mksnapshot_flags_ignition)',
                             '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition_host.bin',
                             '<(embed_script)',
+                            '<(warmup_script)',
                           ],
                         }, {
                           'outputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
@@ -338,6 +337,7 @@
                             '<@(mksnapshot_flags_ignition)',
                             '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
                             '<(embed_script)',
+                            '<(warmup_script)',
                           ],
                         }],
                       ],
@@ -348,6 +348,7 @@
                         '<@(mksnapshot_flags_ignition)',
                         '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
                         '<(embed_script)',
+                        '<(warmup_script)',
                       ],
                     }],
                   ],
@@ -370,10 +371,7 @@
               'action_name': 'run_mksnapshot (external)',
               'inputs': ['<(mksnapshot_exec)'],
               'variables': {
-                'mksnapshot_flags': [
-                  '--log-snapshot-positions',
-                  '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
-                ],
+                'mksnapshot_flags': [],
                 'conditions': [
                   ['v8_random_seed!=0', {
                     'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
@@ -393,6 +391,7 @@
                         '<@(mksnapshot_flags)',
                         '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
                         '<(embed_script)',
+                        '<(warmup_script)',
                       ],
                     }, {
                       'outputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
@@ -401,6 +400,7 @@
                         '<@(mksnapshot_flags)',
                         '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
                         '<(embed_script)',
+                        '<(warmup_script)',
                       ],
                     }],
                   ],
@@ -411,6 +411,7 @@
                     '<@(mksnapshot_flags)',
                     '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
                     '<(embed_script)',
+                    '<(warmup_script)',
                   ],
                 }],
               ],
@@ -459,6 +460,8 @@
         '../../src/api-experimental.h',
         '../../src/api.cc',
         '../../src/api.h',
+        '../../src/api-arguments.cc',
+        '../../src/api-arguments.h',
         '../../src/api-natives.cc',
         '../../src/api-natives.h',
         '../../src/arguments.cc',
@@ -521,6 +524,7 @@
         '../../src/code-stubs-hydrogen.cc',
         '../../src/codegen.cc',
         '../../src/codegen.h',
+        '../../src/collector.h',
         '../../src/compilation-cache.cc',
         '../../src/compilation-cache.h',
         '../../src/compilation-dependencies.cc',
@@ -574,8 +578,6 @@
         '../../src/compiler/escape-analysis.h',
         "../../src/compiler/escape-analysis-reducer.cc",
         "../../src/compiler/escape-analysis-reducer.h",
-        '../../src/compiler/fast-accessor-assembler.cc',
-        '../../src/compiler/fast-accessor-assembler.h',
         '../../src/compiler/frame.cc',
         '../../src/compiler/frame.h',
         '../../src/compiler/frame-elider.cc',
@@ -730,11 +732,11 @@
         '../../src/conversions.h',
         '../../src/counters.cc',
         '../../src/counters.h',
+        '../../src/crankshaft/compilation-phase.cc',
+        '../../src/crankshaft/compilation-phase.h',
         '../../src/crankshaft/hydrogen-alias-analysis.h',
         '../../src/crankshaft/hydrogen-bce.cc',
         '../../src/crankshaft/hydrogen-bce.h',
-        '../../src/crankshaft/hydrogen-bch.cc',
-        '../../src/crankshaft/hydrogen-bch.h',
         '../../src/crankshaft/hydrogen-canonicalize.cc',
         '../../src/crankshaft/hydrogen-canonicalize.h',
         '../../src/crankshaft/hydrogen-check-elimination.cc',
@@ -835,8 +837,12 @@
         '../../src/extensions/statistics-extension.h',
         '../../src/extensions/trigger-failure-extension.cc',
         '../../src/extensions/trigger-failure-extension.h',
+        '../../src/external-reference-table.cc',
+        '../../src/external-reference-table.h',
         '../../src/factory.cc',
         '../../src/factory.h',
+        '../../src/fast-accessor-assembler.cc',
+        '../../src/fast-accessor-assembler.h',
         '../../src/fast-dtoa.cc',
         '../../src/fast-dtoa.h',
         '../../src/field-index.h',
@@ -889,6 +895,7 @@
         '../../src/heap/objects-visiting-inl.h',
         '../../src/heap/objects-visiting.cc',
         '../../src/heap/objects-visiting.h',
+        '../../src/heap/page-parallel-job.h',
         '../../src/heap/remembered-set.cc',
         '../../src/heap/remembered-set.h',
         '../../src/heap/scavenge-job.h',
@@ -897,12 +904,9 @@
         '../../src/heap/scavenger.cc',
         '../../src/heap/scavenger.h',
         '../../src/heap/slot-set.h',
-        '../../src/heap/slots-buffer.cc',
-        '../../src/heap/slots-buffer.h',
         '../../src/heap/spaces-inl.h',
         '../../src/heap/spaces.cc',
         '../../src/heap/spaces.h',
-        '../../src/heap/store-buffer-inl.h',
         '../../src/heap/store-buffer.cc',
         '../../src/heap/store-buffer.h',
         '../../src/i18n.cc',
@@ -947,8 +951,8 @@
         '../../src/interpreter/interpreter.h',
         '../../src/interpreter/interpreter-assembler.cc',
         '../../src/interpreter/interpreter-assembler.h',
-        '../../src/interpreter/register-translator.cc',
-        '../../src/interpreter/register-translator.h',
+        '../../src/interpreter/interpreter-intrinsics.cc',
+        '../../src/interpreter/interpreter-intrinsics.h',
         '../../src/interpreter/source-position-table.cc',
         '../../src/interpreter/source-position-table.h',
         '../../src/isolate-inl.h',
@@ -956,8 +960,8 @@
         '../../src/isolate.h',
         '../../src/json-parser.h',
         '../../src/json-stringifier.h',
-        '../../src/key-accumulator.h',
-        '../../src/key-accumulator.cc',
+        '../../src/keys.h',
+        '../../src/keys.cc',
         '../../src/layout-descriptor-inl.h',
         '../../src/layout-descriptor.cc',
         '../../src/layout-descriptor.h',
@@ -1013,6 +1017,8 @@
         '../../src/parsing/token.h',
         '../../src/pending-compilation-error-handler.cc',
         '../../src/pending-compilation-error-handler.h',
+        '../../src/perf-jit.cc',
+        '../../src/perf-jit.h',
         '../../src/profiler/allocation-tracker.cc',
         '../../src/profiler/allocation-tracker.h',
         '../../src/profiler/circular-queue-inl.h',
@@ -1104,14 +1110,24 @@
         '../../src/signature.h',
         '../../src/simulator.h',
         '../../src/small-pointer-list.h',
+        '../../src/snapshot/code-serializer.cc',
+        '../../src/snapshot/code-serializer.h',
+        '../../src/snapshot/deserializer.cc',
+        '../../src/snapshot/deserializer.h',
         '../../src/snapshot/natives.h',
         '../../src/snapshot/natives-common.cc',
-        '../../src/snapshot/serialize.cc',
-        '../../src/snapshot/serialize.h',
+        '../../src/snapshot/partial-serializer.cc',
+        '../../src/snapshot/partial-serializer.h',
+        '../../src/snapshot/serializer.cc',
+        '../../src/snapshot/serializer.h',
+        '../../src/snapshot/serializer-common.cc',
+        '../../src/snapshot/serializer-common.h',
         '../../src/snapshot/snapshot.h',
         '../../src/snapshot/snapshot-common.cc',
         '../../src/snapshot/snapshot-source-sink.cc',
         '../../src/snapshot/snapshot-source-sink.h',
+        '../../src/snapshot/startup-serializer.cc',
+        '../../src/snapshot/startup-serializer.h',
         '../../src/source-position.h',
         '../../src/splay-tree.h',
         '../../src/splay-tree-inl.h',
@@ -1538,7 +1554,6 @@
             '../../src/ppc/frames-ppc.cc',
             '../../src/ppc/frames-ppc.h',
             '../../src/ppc/interface-descriptors-ppc.cc',
-            '../../src/ppc/interface-descriptors-ppc.h',
             '../../src/ppc/macro-assembler-ppc.cc',
             '../../src/ppc/macro-assembler-ppc.h',
             '../../src/ppc/simulator-ppc.cc',
@@ -1547,6 +1562,49 @@
             '../../src/regexp/ppc/regexp-macro-assembler-ppc.h',
           ],
         }],
+        ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+          'sources': [  ### gcmole(arch:s390) ###
+            '../../src/compiler/s390/code-generator-s390.cc',
+            '../../src/compiler/s390/instruction-codes-s390.h',
+            '../../src/compiler/s390/instruction-scheduler-s390.cc',
+            '../../src/compiler/s390/instruction-selector-s390.cc',
+            '../../src/crankshaft/s390/lithium-codegen-s390.cc',
+            '../../src/crankshaft/s390/lithium-codegen-s390.h',
+            '../../src/crankshaft/s390/lithium-gap-resolver-s390.cc',
+            '../../src/crankshaft/s390/lithium-gap-resolver-s390.h',
+            '../../src/crankshaft/s390/lithium-s390.cc',
+            '../../src/crankshaft/s390/lithium-s390.h',
+            '../../src/debug/s390/debug-s390.cc',
+            '../../src/full-codegen/s390/full-codegen-s390.cc',
+            '../../src/ic/s390/access-compiler-s390.cc',
+            '../../src/ic/s390/handler-compiler-s390.cc',
+            '../../src/ic/s390/ic-compiler-s390.cc',
+            '../../src/ic/s390/ic-s390.cc',
+            '../../src/ic/s390/stub-cache-s390.cc',
+            '../../src/regexp/s390/regexp-macro-assembler-s390.cc',
+            '../../src/regexp/s390/regexp-macro-assembler-s390.h',
+            '../../src/s390/assembler-s390.cc',
+            '../../src/s390/assembler-s390.h',
+            '../../src/s390/assembler-s390-inl.h',
+            '../../src/s390/builtins-s390.cc',
+            '../../src/s390/codegen-s390.cc',
+            '../../src/s390/codegen-s390.h',
+            '../../src/s390/code-stubs-s390.cc',
+            '../../src/s390/code-stubs-s390.h',
+            '../../src/s390/constants-s390.cc',
+            '../../src/s390/constants-s390.h',
+            '../../src/s390/cpu-s390.cc',
+            '../../src/s390/deoptimizer-s390.cc',
+            '../../src/s390/disasm-s390.cc',
+            '../../src/s390/frames-s390.cc',
+            '../../src/s390/frames-s390.h',
+            '../../src/s390/interface-descriptors-s390.cc',
+            '../../src/s390/macro-assembler-s390.cc',
+            '../../src/s390/macro-assembler-s390.h',
+            '../../src/s390/simulator-s390.cc',
+            '../../src/s390/simulator-s390.h',
+          ],
+        }],
         ['OS=="win"', {
           'variables': {
             'gyp_generators': '<!(echo $GYP_GENERATORS)',
@@ -1607,6 +1665,8 @@
         '../..',
       ],
       'sources': [
+        '../../src/base/accounting-allocator.cc',
+        '../../src/base/accounting-allocator.h',
         '../../src/base/adapters.h',
         '../../src/base/atomicops.h',
         '../../src/base/atomicops_internals_arm64_gcc.h',
@@ -1890,6 +1950,11 @@
           'toolsets': ['target'],
         }],
       ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '../../include',
+        ],
+      },
     },
     {
       'target_name': 'natives_blob',
@@ -1999,6 +2064,7 @@
           '../../src/js/string-iterator.js',
           '../../src/js/templates.js',
           '../../src/js/spread.js',
+          '../../src/js/proxy.js',
           '../../src/debug/mirrors.js',
           '../../src/debug/debug.js',
           '../../src/debug/liveedit.js',
@@ -2006,15 +2072,15 @@
         'experimental_library_files': [
           '../../src/js/macros.py',
           '../../src/messages.h',
-          '../../src/js/proxy.js',
           '../../src/js/generator.js',
           '../../src/js/harmony-atomics.js',
-          '../../src/js/harmony-regexp.js',
+          '../../src/js/harmony-regexp-exec.js',
           '../../src/js/harmony-object-observe.js',
           '../../src/js/harmony-sharedarraybuffer.js',
           '../../src/js/harmony-simd.js',
           '../../src/js/harmony-species.js',
           '../../src/js/harmony-unicode-regexps.js',
+          '../../src/js/harmony-string-padding.js',
           '../../src/js/promise-extra.js',
         ],
         'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
diff --git a/tools/ll_prof.py b/tools/ll_prof.py
index e657961..ca2cb00 100755
--- a/tools/ll_prof.py
+++ b/tools/ll_prof.py
@@ -66,20 +66,20 @@
 
 Examples:
   # Print flat profile with annotated disassembly for the 10 top
-  # symbols. Use default log names and include the snapshot log.
-  $ %prog --snapshot --disasm-top=10
+  # symbols. Use default log names.
+  $ %prog --disasm-top=10
 
   # Print flat profile with annotated disassembly for all used symbols.
   # Use default log names and include kernel symbols into analysis.
   $ %prog --disasm-all --kernel
 
   # Print flat profile. Use custom log names.
-  $ %prog --log=foo.log --snapshot-log=snap-foo.log --trace=foo.data --snapshot
+  $ %prog --log=foo.log --trace=foo.data
 """
 
 
 JS_ORIGIN = "js"
-JS_SNAPSHOT_ORIGIN = "js-snapshot"
+
 
 class Code(object):
   """Code object."""
@@ -199,7 +199,7 @@
       self.origin)
 
   def _GetDisasmLines(self, arch, options):
-    if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
+    if self.origin == JS_ORIGIN:
       inplace = False
       filename = options.log + ".ll"
     else:
@@ -328,30 +328,6 @@
     self.header_size = header_size
 
 
-class SnapshotLogReader(object):
-  """V8 snapshot log reader."""
-
-  _SNAPSHOT_CODE_NAME_RE = re.compile(
-    r"snapshot-code-name,(\d+),\"(.*)\"")
-
-  def __init__(self, log_name):
-    self.log_name = log_name
-
-  def ReadNameMap(self):
-    log = open(self.log_name, "r")
-    try:
-      snapshot_pos_to_name = {}
-      for line in log:
-        match = SnapshotLogReader._SNAPSHOT_CODE_NAME_RE.match(line)
-        if match:
-          pos = int(match.group(1))
-          name = match.group(2)
-          snapshot_pos_to_name[pos] = name
-    finally:
-      log.close()
-    return snapshot_pos_to_name
-
-
 class LogReader(object):
   """V8 low-level (binary) log reader."""
 
@@ -365,17 +341,13 @@
 
   _CODE_CREATE_TAG = "C"
   _CODE_MOVE_TAG = "M"
-  _CODE_DELETE_TAG = "D"
-  _SNAPSHOT_POSITION_TAG = "P"
   _CODE_MOVING_GC_TAG = "G"
 
-  def __init__(self, log_name, code_map, snapshot_pos_to_name):
+  def __init__(self, log_name, code_map):
     self.log_file = open(log_name, "r")
     self.log = mmap.mmap(self.log_file.fileno(), 0, mmap.MAP_PRIVATE)
     self.log_pos = 0
     self.code_map = code_map
-    self.snapshot_pos_to_name = snapshot_pos_to_name
-    self.address_to_snapshot_name = {}
 
     self.arch = self.log[:self.log.find("\0")]
     self.log_pos += len(self.arch) + 1
@@ -395,17 +367,12 @@
     self.code_delete_struct = LogReader._DefineStruct([
         ("address", pointer_type)])
 
-    self.snapshot_position_struct = LogReader._DefineStruct([
-        ("address", pointer_type),
-        ("position", ctypes.c_int32)])
-
   def ReadUpToGC(self):
     while self.log_pos < self.log.size():
       tag = self.log[self.log_pos]
       self.log_pos += 1
 
       if tag == LogReader._CODE_MOVING_GC_TAG:
-        self.address_to_snapshot_name.clear()
         return
 
       if tag == LogReader._CODE_CREATE_TAG:
@@ -413,12 +380,8 @@
         self.log_pos += ctypes.sizeof(event)
         start_address = event.code_address
         end_address = start_address + event.code_size
-        if start_address in self.address_to_snapshot_name:
-          name = self.address_to_snapshot_name[start_address]
-          origin = JS_SNAPSHOT_ORIGIN
-        else:
-          name = self.log[self.log_pos:self.log_pos + event.name_size]
-          origin = JS_ORIGIN
+        name = self.log[self.log_pos:self.log_pos + event.name_size]
+        origin = JS_ORIGIN
         self.log_pos += event.name_size
         origin_offset = self.log_pos
         self.log_pos += event.code_size
@@ -459,30 +422,6 @@
         self.code_map.Add(code)
         continue
 
-      if tag == LogReader._CODE_DELETE_TAG:
-        event = self.code_delete_struct.from_buffer(self.log, self.log_pos)
-        self.log_pos += ctypes.sizeof(event)
-        old_start_address = event.address
-        code = self.code_map.Find(old_start_address)
-        if not code:
-          print >>sys.stderr, "Warning: Not found %x" % old_start_address
-          continue
-        assert code.start_address == old_start_address, \
-            "Inexact delete address %x for %s" % (old_start_address, code)
-        self.code_map.Remove(code)
-        continue
-
-      if tag == LogReader._SNAPSHOT_POSITION_TAG:
-        event = self.snapshot_position_struct.from_buffer(self.log,
-                                                          self.log_pos)
-        self.log_pos += ctypes.sizeof(event)
-        start_address = event.address
-        snapshot_pos = event.position
-        if snapshot_pos in self.snapshot_pos_to_name:
-          self.address_to_snapshot_name[start_address] = \
-              self.snapshot_pos_to_name[snapshot_pos]
-        continue
-
       assert False, "Unknown tag %s" % tag
 
   def Dispose(self):
@@ -898,16 +837,9 @@
 
 if __name__ == "__main__":
   parser = optparse.OptionParser(USAGE)
-  parser.add_option("--snapshot-log",
-                    default="obj/release/snapshot.log",
-                    help="V8 snapshot log file name [default: %default]")
   parser.add_option("--log",
                     default="v8.log",
                     help="V8 log file name [default: %default]")
-  parser.add_option("--snapshot",
-                    default=False,
-                    action="store_true",
-                    help="process V8 snapshot log [default: %default]")
   parser.add_option("--trace",
                     default="perf.data",
                     help="perf trace file name [default: %default]")
@@ -945,12 +877,7 @@
   options, args = parser.parse_args()
 
   if not options.quiet:
-    if options.snapshot:
-      print "V8 logs: %s, %s, %s.ll" % (options.snapshot_log,
-                                        options.log,
-                                        options.log)
-    else:
-      print "V8 log: %s, %s.ll (no snapshot)" % (options.log, options.log)
+    print "V8 log: %s, %s.ll" % (options.log, options.log)
     print "Perf trace file: %s" % options.trace
 
   V8_GC_FAKE_MMAP = options.gc_fake_mmap
@@ -972,17 +899,10 @@
   mmap_time = 0
   sample_time = 0
 
-  # Process the snapshot log to fill the snapshot name map.
-  snapshot_name_map = {}
-  if options.snapshot:
-    snapshot_log_reader = SnapshotLogReader(log_name=options.snapshot_log)
-    snapshot_name_map = snapshot_log_reader.ReadNameMap()
-
   # Initialize the log reader.
   code_map = CodeMap()
   log_reader = LogReader(log_name=options.log + ".ll",
-                         code_map=code_map,
-                         snapshot_pos_to_name=snapshot_name_map)
+                         code_map=code_map)
   if not options.quiet:
     print "Generated code architecture: %s" % log_reader.arch
     print
diff --git a/tools/parser-shell.cc b/tools/parser-shell.cc
index 5d4b0cc..ad687c9 100644
--- a/tools/parser-shell.cc
+++ b/tools/parser-shell.cc
@@ -102,7 +102,7 @@
   i::ScriptData* cached_data_impl = NULL;
   // First round of parsing (produce data to cache).
   {
-    Zone zone;
+    Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator());
     ParseInfo info(&zone, script);
     info.set_global();
     info.set_cached_data(&cached_data_impl);
@@ -120,7 +120,7 @@
   }
   // Second round of parsing (consume cached data).
   {
-    Zone zone;
+    Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator());
     ParseInfo info(&zone, script);
     info.set_global();
     info.set_cached_data(&cached_data_impl);
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 23940bb..dd3533b 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -58,7 +58,6 @@
 
 LINT_RULES = """
 -build/header_guard
-+build/include_alpha
 -build/include_what_you_use
 -build/namespaces
 -readability/check
diff --git a/tools/profviz/composer.js b/tools/profviz/composer.js
index 85729b6..108911d 100644
--- a/tools/profviz/composer.js
+++ b/tools/profviz/composer.js
@@ -104,15 +104,15 @@
         new TimerEvent("recompile sync", "#CC0044",  true, 0),
       'V8.RecompileConcurrent':
         new TimerEvent("recompile async", "#CC4499", false, 1),
-      'V8.CompileEval':
+      'V8.CompileEvalMicroSeconds':
         new TimerEvent("compile eval", "#CC4400",  true, 0),
       'V8.IcMiss':
         new TimerEvent("ic miss", "#CC9900", false, 0),
-      'V8.Parse':
+      'V8.ParseMicroSeconds':
         new TimerEvent("parse", "#00CC00",  true, 0),
-      'V8.PreParse':
+      'V8.PreParseMicroSeconds':
         new TimerEvent("preparse", "#44CC00",  true, 0),
-      'V8.ParseLazy':
+      'V8.ParseLazyMicroSeconds':
         new TimerEvent("lazy parse", "#00CC44",  true, 0),
       'V8.GCScavenger':
         new TimerEvent("gc scavenge", "#0044CC",  true, 0),
@@ -331,7 +331,7 @@
 
     var line;
     while (line = input()) {
-      logreader.processLogLine(line);
+      for (var s of line.split("\n")) logreader.processLogLine(s);
     }
 
     // Collect execution pauses.
diff --git a/tools/profviz/worker.js b/tools/profviz/worker.js
index b17ca29..7f16308 100644
--- a/tools/profviz/worker.js
+++ b/tools/profviz/worker.js
@@ -106,7 +106,6 @@
          var callGraphSize = 5;
          var ignoreUnknown = true;
          var stateFilter = null;
-         var snapshotLogProcessor = null;
          var range = range_start_override + "," + range_end_override;
 
          var tickProcessor = new TickProcessor(entriesProvider,
@@ -114,7 +113,6 @@
                                                callGraphSize,
                                                ignoreUnknown,
                                                stateFilter,
-                                               snapshotLogProcessor,
                                                distortion,
                                                range);
          for (var i = 0; i < content_lines.length; i++) {
diff --git a/tools/release/auto_roll.py b/tools/release/auto_roll.py
index fc9aeee..b71cac5 100755
--- a/tools/release/auto_roll.py
+++ b/tools/release/auto_roll.py
@@ -155,6 +155,7 @@
     if not self._options.dry_run:
       self.GitUpload(author=self._options.author,
                      force=True,
+                     bypass_hooks=True,
                      cq=self._options.use_commit_queue,
                      cwd=cwd)
       print "CL uploaded."
diff --git a/tools/release/check_clusterfuzz.py b/tools/release/check_clusterfuzz.py
index fc826c1..cd73051 100755
--- a/tools/release/check_clusterfuzz.py
+++ b/tools/release/check_clusterfuzz.py
@@ -71,6 +71,15 @@
   },
   {
     "args": {
+      "job_type": "linux_asan_d8_ignition_dbg",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+  {
+    "args": {
       "job_type": "linux_asan_d8_v8_arm_dbg",
       "reproducible": "True",
       "open": "True",
diff --git a/tools/release/common_includes.py b/tools/release/common_includes.py
index c3a216c..5c03236 100644
--- a/tools/release/common_includes.py
+++ b/tools/release/common_includes.py
@@ -382,7 +382,7 @@
     # is the case for all automated merge and push commits - also no title is
     # the prefix of another title).
     commit = None
-    for wait_interval in [5, 10, 20, 40, 60, 60]:
+    for wait_interval in [10, 30, 60, 60, 60, 60, 60]:
       self.step.Git("fetch")
       commit = self.step.GitLog(n=1, format="%H", grep=message, branch=remote)
       if commit:
diff --git a/tools/release/test_scripts.py b/tools/release/test_scripts.py
index 4f133ac..05457c9 100644
--- a/tools/release/test_scripts.py
+++ b/tools/release/test_scripts.py
@@ -1121,7 +1121,7 @@
            self.ROLL_COMMIT_MSG),
           "", cwd=chrome_dir),
       Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
-          "--use-commit-queue", "", cwd=chrome_dir),
+          "--use-commit-queue --bypass-hooks", "", cwd=chrome_dir),
       Cmd("git checkout -f master", "", cwd=chrome_dir),
       Cmd("git branch -D work-branch", "", cwd=chrome_dir),
     ]
diff --git a/tools/run-deopt-fuzzer.py b/tools/run-deopt-fuzzer.py
index e4d8f16..970aa8e 100755
--- a/tools/run-deopt-fuzzer.py
+++ b/tools/run-deopt-fuzzer.py
@@ -71,6 +71,8 @@
                    "ia32",
                    "ppc",
                    "ppc64",
+                   "s390",
+                   "s390x",
                    "mipsel",
                    "nacl_ia32",
                    "nacl_x64",
@@ -321,7 +323,6 @@
     suite = testsuite.TestSuite.LoadTestSuite(
         os.path.join(BASE_DIR, "test", root))
     if suite:
-      suite.SetupWorkingDirectory()
       suites.append(suite)
 
   if options.download_data:
@@ -387,7 +388,8 @@
                         0,  # No use of a rerun-failing-tests maximum.
                         False,  # No predictable mode.
                         False,  # No no_harness mode.
-                        False)   # Don't use perf data.
+                        False,  # Don't use perf data.
+                        False)  # Coverage not supported.
 
   # Find available test suites and read test cases from them.
   variables = {
diff --git a/tools/run-perf.sh b/tools/run-perf.sh
new file mode 100755
index 0000000..24053b4
--- /dev/null
+++ b/tools/run-perf.sh
@@ -0,0 +1,52 @@
+#! /bin/sh
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+########## Global variable definitions
+
+# Ensure that <your CPU clock> / $SAMPLE_EVERY_N_CYCLES < $MAXIMUM_SAMPLE_RATE.
+MAXIMUM_SAMPLE_RATE=10000000
+SAMPLE_EVERY_N_CYCLES=10000
+SAMPLE_RATE_CONFIG_FILE="/proc/sys/kernel/perf_event_max_sample_rate"
+KERNEL_MAP_CONFIG_FILE="/proc/sys/kernel/kptr_restrict"
+CALL_GRAPH_METHOD="fp"  # dwarf does not play nice with JITted objects.
+
+########## Usage
+
+usage() {
+cat << EOF
+usage: $0 <benchmark_command>
+
+Executes <benchmark_command> under observation by Linux perf.
+Sampling event is cycles in user space, call graphs are recorded.
+EOF
+}
+
+if [ $# -eq 0 ] || [ "$1" = "-h" ]  || [ "$1" = "--help" ] ; then
+  usage
+  exit 1
+fi
+
+########## Actual script execution
+
+ACTUAL_SAMPLE_RATE=$(cat $SAMPLE_RATE_CONFIG_FILE)
+if [ "$ACTUAL_SAMPLE_RATE" -lt "$MAXIMUM_SAMPLE_RATE" ] ; then
+  echo "Setting appropriate maximum sample rate..."
+  echo $MAXIMUM_SAMPLE_RATE | sudo tee $SAMPLE_RATE_CONFIG_FILE
+fi
+
+ACTUAL_KERNEL_MAP_RESTRICTION=$(cat $KERNEL_MAP_CONFIG_FILE)
+if [ "$ACTUAL_KERNEL_MAP_RESTRICTION" -ne "0" ] ; then
+  echo "Disabling kernel address map restriction..."
+  echo 0 | sudo tee $KERNEL_MAP_CONFIG_FILE
+fi
+
+echo "Running..."
+perf record -R \
+  -e cycles:u \
+  -c $SAMPLE_EVERY_N_CYCLES \
+  --call-graph $CALL_GRAPH_METHOD \
+  -i $@ --perf_basic_prof
diff --git a/tools/run-tests.py b/tools/run-tests.py
index c94457f..a380c97 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -85,6 +85,8 @@
   "ignition": [
     "mjsunit",
     "cctest",
+    "webkit",
+    "message",
   ],
   # This needs to stay in sync with test/optimize_for_size.isolate.
   "optimize_for_size": [
@@ -173,6 +175,8 @@
                    "mips64el",
                    "nacl_ia32",
                    "nacl_x64",
+                   "s390",
+                   "s390x",
                    "ppc",
                    "ppc64",
                    "x64",
@@ -208,6 +212,8 @@
   result.add_option("--asan",
                     help="Regard test expectations for ASAN",
                     default=False, action="store_true")
+  result.add_option("--sancov-dir",
+                    help="Directory where to collect coverage data")
   result.add_option("--cfi-vptr",
                     help="Run tests with UBSAN cfi_vptr option.",
                     default=False, action="store_true")
@@ -222,9 +228,6 @@
                     default=False, action="store_true")
   result.add_option("--cat", help="Print the source of the tests",
                     default=False, action="store_true")
-  result.add_option("--flaky-tests",
-                    help="Regard tests marked as flaky (run|skip|dontcare)",
-                    default="dontcare")
   result.add_option("--slow-tests",
                     help="Regard slow tests (run|skip|dontcare)",
                     default="dontcare")
@@ -298,7 +301,7 @@
                           " (verbose, dots, color, mono)"),
                     choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
   result.add_option("--quickcheck", default=False, action="store_true",
-                    help=("Quick check mode (skip slow/flaky tests)"))
+                    help=("Quick check mode (skip slow tests)"))
   result.add_option("--report", help="Print a summary of the tests to be run",
                     default=False, action="store_true")
   result.add_option("--json-test-results",
@@ -385,6 +388,14 @@
   if options.asan:
     os.environ['ASAN_OPTIONS'] = symbolizer
 
+  if options.sancov_dir:
+    assert os.path.exists(options.sancov_dir)
+    os.environ['ASAN_OPTIONS'] = ":".join([
+      'coverage=1',
+      'coverage_dir=%s' % options.sancov_dir,
+      symbolizer,
+    ])
+
   if options.cfi_vptr:
     os.environ['UBSAN_OPTIONS'] = ":".join([
       'print_stacktrace=1',
@@ -490,7 +501,6 @@
     return False
   if options.quickcheck:
     VARIANTS = ["default", "stress"]
-    options.flaky_tests = "skip"
     options.slow_tests = "skip"
     options.pass_fail_tests = "skip"
   if options.no_stress:
@@ -524,8 +534,6 @@
       print "Unknown %s mode %s" % (name, option)
       return False
     return True
-  if not CheckTestMode("flaky test", options.flaky_tests):
-    return False
   if not CheckTestMode("slow test", options.slow_tests):
     return False
   if not CheckTestMode("pass|fail test", options.pass_fail_tests):
@@ -616,7 +624,6 @@
     suite = testsuite.TestSuite.LoadTestSuite(
         os.path.join(BASE_DIR, "test", root))
     if suite:
-      suite.SetupWorkingDirectory()
       suites.append(suite)
 
   if options.download_data or options.download_data_only:
@@ -688,7 +695,8 @@
                         options.rerun_failures_max,
                         options.predictable,
                         options.no_harness,
-                        use_perf_data=not options.swarming)
+                        use_perf_data=not options.swarming,
+                        sancov_dir=options.sancov_dir)
 
   # TODO(all): Combine "simulator" and "simulator_run".
   simulator_run = not options.dont_skip_simulator_slow_tests and \
@@ -725,8 +733,8 @@
     if len(args) > 0:
       s.FilterTestCasesByArgs(args)
     all_tests += s.tests
-    s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
-                              options.slow_tests, options.pass_fail_tests)
+    s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
+                              options.pass_fail_tests)
     if options.cat:
       verbose.PrintTestSource(s.tests)
       continue
@@ -815,6 +823,18 @@
           "with failure information.")
     exit_code = 0
 
+  if options.sancov_dir:
+    # If tests ran with sanitizer coverage, merge coverage files in the end.
+    try:
+      print "Merging sancov files."
+      subprocess.check_call([
+        sys.executable,
+        join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
+        "--coverage-dir=%s" % options.sancov_dir])
+    except:
+      print >> sys.stderr, "Error: Merging sancov files failed."
+      exit_code = 1
+
   return exit_code
 
 
diff --git a/tools/sanitizers/sancov_formatter.py b/tools/sanitizers/sancov_formatter.py
new file mode 100755
index 0000000..4f3ea9e
--- /dev/null
+++ b/tools/sanitizers/sancov_formatter.py
@@ -0,0 +1,446 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script to transform and merge sancov files into human readable json-format.
+
+The script supports three actions:
+all: Writes a json file with all instrumented lines of all executables.
+merge: Merges sancov files with coverage output into an existing json file.
+split: Split json file into separate files per covered source file.
+
+The json data is structured as follows:
+{
+  "version": 1,
+  "tests": ["executable1", "executable2", ...],
+  "files": {
+    "file1": [[<instr line 1>, <bit_mask>], [<instr line 2>, <bit_mask>], ...],
+    "file2": [...],
+    ...
+  }
+}
+
+The executables are sorted and determine the test bit mask. Their index+1 is
+the bit, e.g. executable1 = 1, executable3 = 4, etc. Hence, a line covered by
+executable1 and executable3 will have bit_mask == 5 == 0b101. The number of
+tests is restricted to 52 in version 1, to allow javascript JSON parsing of
+the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
+
+The line-number-bit_mask pairs are sorted by line number and don't contain
+duplicates.
+
+Split json data preserves the same format, but only contains one file per
+json file.
+
+The sancov tool is expected to be in the llvm compiler-rt third-party
+directory. It's not checked out by default and must be added as a custom deps:
+'v8/third_party/llvm/projects/compiler-rt':
+    'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
+"""
+
+import argparse
+import json
+import logging
+import os
+import re
+import subprocess
+import sys
+
+from multiprocessing import Pool, cpu_count
+
+
+logging.basicConfig(level=logging.INFO)
+
+# Files to exclude from coverage. Dropping their data early adds more speed.
+# The contained cc files are already excluded from instrumentation, but inlined
+# data is referenced through v8's object files.
+EXCLUSIONS = [
+  'buildtools',
+  'src/third_party',
+  'third_party',
+  'test',
+  'testing',
+]
+
+# Executables found in the build output for which no coverage is generated.
+# Exclude them from the coverage data file.
+EXE_BLACKLIST = [
+  'generate-bytecode-expectations',
+  'hello-world',
+  'mksnapshot',
+  'parser-shell',
+  'process',
+  'shell',
+]
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+
+# Executable location. TODO(machenbach): Only release is supported for now.
+BUILD_DIR = os.path.join(BASE_DIR, 'out', 'Release')
+
+# Path prefix added by the llvm symbolizer including trailing slash.
+OUTPUT_PATH_PREFIX = os.path.join(BUILD_DIR, '..', '..', '')
+
+# The sancov tool location.
+SANCOV_TOOL = os.path.join(
+    BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
+    'lib', 'sanitizer_common', 'scripts', 'sancov.py')
+
+# Simple script to sanitize the PCs from objdump.
+SANITIZE_PCS = os.path.join(BASE_DIR, 'tools', 'sanitizers', 'sanitize_pcs.py')
+
+# The llvm symbolizer location.
+SYMBOLIZER = os.path.join(
+    BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
+    'llvm-symbolizer')
+
+# Number of cpus.
+CPUS = cpu_count()
+
+# Regexp to find sancov files as output by sancov_merger.py. Also grabs the
+# executable name in group 1.
+SANCOV_FILE_RE = re.compile(r'^(.*)\.result.sancov$')
+
+
+def executables():
+  """Iterates over executable files in the build directory."""
+  for f in os.listdir(BUILD_DIR):
+    file_path = os.path.join(BUILD_DIR, f)
+    if (os.path.isfile(file_path) and
+        os.access(file_path, os.X_OK) and
+        f not in EXE_BLACKLIST):
+      yield file_path
+
+
+def process_symbolizer_output(output):
+  """Post-process llvm symbolizer output.
+
+  Excludes files outside the v8 checkout or given in exclusion list above
+  from further processing. Drops the character index in each line.
+
+  Returns: A mapping of file names to lists of line numbers. The file names
+           have relative paths to the v8 base directory. The lists of line
+           numbers don't contain duplicate lines and are sorted.
+  """
+  # Drop path prefix when iterating lines. The path is redundant and takes
+  # too much space. Drop files outside that path, e.g. generated files in
+  # the build dir and absolute paths to c++ library headers.
+  def iter_lines():
+    for line in output.strip().splitlines():
+      if line.startswith(OUTPUT_PATH_PREFIX):
+        yield line[len(OUTPUT_PATH_PREFIX):]
+
+  # Map file names to sets of instrumented line numbers.
+  file_map = {}
+  for line in iter_lines():
+    # Drop character number, we only care for line numbers. Each line has the
+    # form: <file name>:<line number>:<character number>.
+    file_name, number, _ = line.split(':')
+    file_map.setdefault(file_name, set([])).add(int(number))
+
+  # Remove exclusion patterns from file map. It's cheaper to do it after the
+  # mapping, as there are few excluded files and we don't want to do this
+  # check for numerous lines in ordinary files.
+  def keep(file_name):
+    for e in EXCLUSIONS:
+      if file_name.startswith(e):
+        return False
+    return True
+
+  # Return in serializable form and filter.
+  return {k: sorted(file_map[k]) for k in file_map if keep(k)}
+
+
+def get_instrumented_lines(executable):
+  """Return the instrumented lines of an executable.
+
+  Called trough multiprocessing pool.
+
+  Returns: Post-processed llvm output as returned by process_symbolizer_output.
+  """
+  # The first two pipes are from llvm's tool sancov.py with 0x added to the hex
+  # numbers. The results are piped into the llvm symbolizer, which outputs for
+  # each PC: <file name with abs path>:<line number>:<character number>.
+  # We don't call the sancov tool to get more speed.
+  process = subprocess.Popen(
+      'objdump -d %s | '
+      'grep \'^\s\+[0-9a-f]\+:.*\scall\(q\|\)\s\+[0-9a-f]\+ '
+      '<__sanitizer_cov\(_with_check\|\)\(@plt\|\)>\' | '
+      'grep \'^\s\+[0-9a-f]\+\' -o | '
+      '%s | '
+      '%s --obj %s -functions=none' %
+          (executable, SANITIZE_PCS, SYMBOLIZER, executable),
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      stdin=subprocess.PIPE,
+      cwd=BASE_DIR,
+      shell=True,
+  )
+  output, _ = process.communicate()
+  assert process.returncode == 0
+  return process_symbolizer_output(output)
+
+
+def merge_instrumented_line_results(exe_list, results):
+  """Merge multiprocessing results for all instrumented lines.
+
+  Args:
+    exe_list: List of all executable names with absolute paths.
+    results: List of results as returned by get_instrumented_lines.
+
+  Returns: Dict to be used as json data as specified on the top of this page.
+           The dictionary contains all instrumented lines of all files
+           referenced by all executables.
+  """
+  def merge_files(x, y):
+    for file_name, lines in y.iteritems():
+      x.setdefault(file_name, set([])).update(lines)
+    return x
+  result = reduce(merge_files, results, {})
+
+  # Return data as file->lines mapping. The lines are saved as lists
+  # with (line number, test bits (as int)). The test bits are initialized with
+  # 0, meaning instrumented, but no coverage.
+  # The order of the test bits is given with key 'tests'. For now, these are
+  # the executable names. We use a _list_ with two items instead of a tuple to
+  # ease merging by allowing mutation of the second item.
+  return {
+    'version': 1,
+    'tests': sorted(map(os.path.basename, exe_list)),
+    'files': {f: map(lambda l: [l, 0], sorted(result[f])) for f in result},
+  }
+
+
+def write_instrumented(options):
+  """Implements the 'all' action of this tool."""
+  exe_list = list(executables())
+  logging.info('Reading instrumented lines from %d executables.',
+               len(exe_list))
+  pool = Pool(CPUS)
+  try:
+    results = pool.imap_unordered(get_instrumented_lines, exe_list)
+  finally:
+    pool.close()
+
+  # Merge multiprocessing results and prepare output data.
+  data = merge_instrumented_line_results(exe_list, results)
+
+  logging.info('Read data from %d executables, which covers %d files.',
+               len(data['tests']), len(data['files']))
+  logging.info('Writing results to %s', options.json_output)
+
+  # Write json output.
+  with open(options.json_output, 'w') as f:
+    json.dump(data, f, sort_keys=True)
+
+
+def get_covered_lines(args):
+  """Return the covered lines of an executable.
+
+  Called trough multiprocessing pool. The args are expected to unpack to:
+    cov_dir: Folder with sancov files merged by sancov_merger.py.
+    executable: The executable that was called to produce the given coverage
+                data.
+    sancov_file: The merged sancov file with coverage data.
+
+  Returns: A tuple of post-processed llvm output as returned by
+           process_symbolizer_output and the executable name.
+  """
+  cov_dir, executable, sancov_file = args
+
+  # Let the sancov tool print the covered PCs and pipe them through the llvm
+  # symbolizer.
+  process = subprocess.Popen(
+      '%s print %s 2> /dev/null | '
+      '%s --obj %s -functions=none' %
+          (SANCOV_TOOL,
+           os.path.join(cov_dir, sancov_file),
+           SYMBOLIZER,
+           os.path.join(BUILD_DIR, executable)),
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      stdin=subprocess.PIPE,
+      cwd=BASE_DIR,
+      shell=True,
+  )
+  output, _ = process.communicate()
+  assert process.returncode == 0
+  return process_symbolizer_output(output), executable
+
+
+def merge_covered_line_results(data, results):
+  """Merge multiprocessing results for covered lines.
+
+  The data is mutated, the results are merged into it in place.
+
+  Args:
+    data: Existing coverage data from json file containing all instrumented
+          lines.
+    results: List of results as returned by get_covered_lines.
+  """
+
+  # List of executables and mapping to the test bit mask. The number of
+  # tests is restricted to 52, to allow javascript JSON parsing of
+  # the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
+  exe_list = data['tests']
+  assert len(exe_list) <= 52, 'Max 52 different tests are supported.'
+  test_bit_masks = {exe:1<<i for i, exe in enumerate(exe_list)}
+
+  def merge_lines(old_lines, new_lines, mask):
+    """Merge the coverage data of a list of lines.
+
+    Args:
+      old_lines: Lines as list of pairs with line number and test bit mask.
+                 The new lines will be merged into the list in place.
+      new_lines: List of new (covered) lines (sorted).
+      mask: The bit to be set for covered lines. The bit index is the test
+            index of the executable that covered the line.
+    """
+    i = 0
+    # Iterate over old and new lines, both are sorted.
+    for l in new_lines:
+      while old_lines[i][0] < l:
+        # Forward instrumented lines not present in this coverage data.
+        i += 1
+        # TODO: Add more context to the assert message.
+        assert i < len(old_lines), 'Covered line %d not in input file.' % l
+      assert old_lines[i][0] == l, 'Covered line %d not in input file.' % l
+
+      # Add coverage information to the line.
+      old_lines[i][1] |= mask
+
+  def merge_files(data, result):
+    """Merge result into data.
+
+    The data is mutated in place.
+
+    Args:
+      data: Merged coverage data from the previous reduce step.
+      result: New result to be merged in. The type is as returned by
+              get_covered_lines.
+    """
+    file_map, executable = result
+    files = data['files']
+    for file_name, lines in file_map.iteritems():
+      merge_lines(files[file_name], lines, test_bit_masks[executable])
+    return data
+
+  reduce(merge_files, results, data)
+
+
+def merge(options):
+  """Implements the 'merge' action of this tool."""
+
+  # Check if folder with coverage output exists.
+  assert (os.path.exists(options.coverage_dir) and
+          os.path.isdir(options.coverage_dir))
+
+  # Inputs for multiprocessing. List of tuples of:
+  # Coverage dir, executable name, sancov file name.
+  inputs = []
+  for f in os.listdir(options.coverage_dir):
+    match = SANCOV_FILE_RE.match(f)
+    if match:
+      inputs.append((options.coverage_dir, match.group(1), f))
+
+  logging.info('Merging %d sancov files into %s',
+               len(inputs), options.json_input)
+
+  # Post-process covered lines in parallel.
+  pool = Pool(CPUS)
+  try:
+    results = pool.imap_unordered(get_covered_lines, inputs)
+  finally:
+    pool.close()
+
+  # Load existing json data file for merging the results.
+  with open(options.json_input, 'r') as f:
+    data = json.load(f)
+
+  # Merge muliprocessing results. Mutates data.
+  merge_covered_line_results(data, results)
+
+  logging.info('Merged data from %d executables, which covers %d files.',
+               len(data['tests']), len(data['files']))
+  logging.info('Writing results to %s', options.json_output)
+
+  # Write merged results to file.
+  with open(options.json_output, 'w') as f:
+    json.dump(data, f, sort_keys=True)
+
+
+def split(options):
+  """Implements the 'split' action of this tool."""
+  # Load existing json data file for splitting.
+  with open(options.json_input, 'r') as f:
+    data = json.load(f)
+
+  logging.info('Splitting off %d coverage files from %s',
+               len(data['files']), options.json_input)
+
+  for file_name, coverage in data['files'].iteritems():
+    # Preserve relative directories that are part of the file name.
+    file_path = os.path.join(options.output_dir, file_name + '.json')
+    try:
+      os.makedirs(os.path.dirname(file_path))
+    except OSError:
+      # Ignore existing directories.
+      pass
+
+    with open(file_path, 'w') as f:
+      # Flat-copy the old dict.
+      new_data = dict(data)
+
+      # Update current file.
+      new_data['files'] = {file_name: coverage}
+
+      # Write json data.
+      json.dump(new_data, f, sort_keys=True)
+
+
+def main(args=None):
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--coverage-dir',
+                      help='Path to the sancov output files.')
+  parser.add_argument('--json-input',
+                      help='Path to an existing json file with coverage data.')
+  parser.add_argument('--json-output',
+                      help='Path to a file to write json output to.')
+  parser.add_argument('--output-dir',
+                      help='Directory where to put split output files to.')
+  parser.add_argument('action', choices=['all', 'merge', 'split'],
+                      help='Action to perform.')
+
+  options = parser.parse_args(args)
+  if options.action.lower() == 'all':
+    if not options.json_output:
+      print '--json-output is required'
+      return 1
+    write_instrumented(options)
+  elif options.action.lower() == 'merge':
+    if not options.coverage_dir:
+      print '--coverage-dir is required'
+      return 1
+    if not options.json_input:
+      print '--json-input is required'
+      return 1
+    if not options.json_output:
+      print '--json-output is required'
+      return 1
+    merge(options)
+  elif options.action.lower() == 'split':
+    if not options.json_input:
+      print '--json-input is required'
+      return 1
+    if not options.output_dir:
+      print '--output-dir is required'
+      return 1
+    split(options)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/tools/sanitizers/sancov_formatter_test.py b/tools/sanitizers/sancov_formatter_test.py
new file mode 100644
index 0000000..6a741c8
--- /dev/null
+++ b/tools/sanitizers/sancov_formatter_test.py
@@ -0,0 +1,222 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Requires python-coverage. Native python coverage version >= 3.7.1 should
+# be installed to get the best speed.
+
+import copy
+import coverage
+import logging
+import json
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+
+# Directory of this file.
+LOCATION = os.path.dirname(os.path.abspath(__file__))
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(LOCATION))
+
+# Executable location.
+BUILD_DIR = os.path.join(BASE_DIR, 'out', 'Release')
+
+def abs_line(line):
+  """Absolute paths as output by the llvm symbolizer."""
+  return '%s/%s' % (BUILD_DIR, line)
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_process_symbolizer_output. This simulates output from the
+# llvm symbolizer. The paths are not normlized.
+SYMBOLIZER_OUTPUT = (
+  abs_line('../../src/foo.cc:87:7\n') +
+  abs_line('../../src/foo.cc:92:0\n') + # Test sorting.
+  abs_line('../../src/baz/bar.h:1234567:0\n') + # Test large line numbers.
+  abs_line('../../src/foo.cc:92:0\n') + # Test duplicates.
+  abs_line('../../src/baz/bar.h:0:0\n') + # Test subdirs.
+  '/usr/include/cool_stuff.h:14:2\n' + # Test dropping absolute paths.
+  abs_line('../../src/foo.cc:87:10\n') + # Test dropping character indexes.
+  abs_line('../../third_party/icu.cc:0:0\n') + # Test dropping excluded dirs.
+  abs_line('../../src/baz/bar.h:11:0\n')
+)
+
+# The expected post-processed output maps relative file names to line numbers.
+# The numbers are sorted and unique.
+EXPECTED_PROCESSED_OUTPUT = {
+  'src/baz/bar.h': [0, 11, 1234567],
+  'src/foo.cc': [87, 92],
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_merge_instrumented_line_results. A list of absolute paths to
+# all executables.
+EXE_LIST = [
+  '/path/to/d8',
+  '/path/to/cctest',
+  '/path/to/unittests',
+]
+
+# Post-processed llvm symbolizer output as returned by
+# process_symbolizer_output. These are lists of this output for merging.
+INSTRUMENTED_LINE_RESULTS = [
+  {
+    'src/baz/bar.h': [0, 3, 7],
+    'src/foo.cc': [11],
+  },
+  {
+    'src/baz/bar.h': [3, 7, 8],
+    'src/baz.cc': [2],
+    'src/foo.cc': [1, 92],
+  },
+  {
+    'src/baz.cc': [1],
+    'src/foo.cc': [92, 93],
+  },
+]
+
+# This shows initial instrumentation. No lines are covered, hence,
+# the coverage mask is 0 for all lines. The line tuples remain sorted by
+# line number and contain no duplicates.
+EXPECTED_INSTRUMENTED_LINES_DATA = {
+  'version': 1,
+  'tests': ['cctest', 'd8', 'unittests'],
+  'files': {
+    'src/baz/bar.h': [[0, 0], [3, 0], [7, 0], [8, 0]],
+    'src/baz.cc': [[1, 0], [2, 0]],
+    'src/foo.cc': [[1, 0], [11, 0], [92, 0], [93, 0]],
+  },
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_merge_covered_line_results. List of post-processed
+# llvm-symbolizer output as a tuple including the executable name of each data
+# set.
+COVERED_LINE_RESULTS = [
+  ({
+     'src/baz/bar.h': [3, 7],
+     'src/foo.cc': [11],
+   }, 'd8'),
+  ({
+     'src/baz/bar.h': [3, 7],
+     'src/baz.cc': [2],
+     'src/foo.cc': [1],
+   }, 'cctest'),
+  ({
+     'src/foo.cc': [92],
+     'src/baz.cc': [2],
+   }, 'unittests'),
+]
+
+# This shows initial instrumentation + coverage. The mask bits are:
+# cctest: 1, d8: 2, unittests:4. So a line covered by cctest and unittests
+# has a coverage mask of 0b101, e.g. line 2 in src/baz.cc.
+EXPECTED_COVERED_LINES_DATA = {
+  'version': 1,
+  'tests': ['cctest', 'd8', 'unittests'],
+  'files': {
+    'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
+    'src/baz.cc': [[1, 0b0], [2, 0b101]],
+    'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
+  },
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_split.
+
+EXPECTED_SPLIT_FILES = [
+  (
+    os.path.join('src', 'baz', 'bar.h.json'),
+    {
+      'version': 1,
+      'tests': ['cctest', 'd8', 'unittests'],
+      'files': {
+        'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
+      },
+    },
+  ),
+  (
+    os.path.join('src', 'baz.cc.json'),
+    {
+      'version': 1,
+      'tests': ['cctest', 'd8', 'unittests'],
+      'files': {
+        'src/baz.cc': [[1, 0b0], [2, 0b101]],
+      },
+    },
+  ),
+  (
+    os.path.join('src', 'foo.cc.json'),
+    {
+      'version': 1,
+      'tests': ['cctest', 'd8', 'unittests'],
+      'files': {
+        'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
+      },
+    },
+  ),
+]
+
+
+class FormatterTests(unittest.TestCase):
+  @classmethod
+  def setUpClass(cls):
+    sys.path.append(LOCATION)
+    cls._cov = coverage.coverage(
+        include=([os.path.join(LOCATION, 'sancov_formatter.py')]))
+    cls._cov.start()
+    import sancov_formatter
+    global sancov_formatter
+
+  @classmethod
+  def tearDownClass(cls):
+    cls._cov.stop()
+    cls._cov.report()
+
+  def test_process_symbolizer_output(self):
+    result = sancov_formatter.process_symbolizer_output(SYMBOLIZER_OUTPUT)
+    self.assertEquals(EXPECTED_PROCESSED_OUTPUT, result)
+
+  def test_merge_instrumented_line_results(self):
+    result = sancov_formatter.merge_instrumented_line_results(
+      EXE_LIST, INSTRUMENTED_LINE_RESULTS)
+    self.assertEquals(EXPECTED_INSTRUMENTED_LINES_DATA, result)
+
+  def test_merge_covered_line_results(self):
+    data = copy.deepcopy(EXPECTED_INSTRUMENTED_LINES_DATA)
+    sancov_formatter.merge_covered_line_results(
+      data, COVERED_LINE_RESULTS)
+    self.assertEquals(EXPECTED_COVERED_LINES_DATA, data)
+
+  def test_split(self):
+    _, json_input = tempfile.mkstemp(prefix='tmp_coverage_test_split')
+    with open(json_input, 'w') as f:
+      json.dump(EXPECTED_COVERED_LINES_DATA, f)
+    output_dir = tempfile.mkdtemp(prefix='tmp_coverage_test_split')
+
+    try:
+      sancov_formatter.main([
+        'split',
+        '--json-input', json_input,
+        '--output-dir', output_dir,
+      ])
+
+      for file_name, expected_data in EXPECTED_SPLIT_FILES:
+        full_path = os.path.join(output_dir, file_name)
+        self.assertTrue(os.path.exists(full_path))
+        with open(full_path) as f:
+          self.assertEquals(expected_data, json.load(f))
+    finally:
+      os.remove(json_input)
+      shutil.rmtree(output_dir)
diff --git a/tools/sanitizers/sancov_merger.py b/tools/sanitizers/sancov_merger.py
new file mode 100755
index 0000000..a4cfec1
--- /dev/null
+++ b/tools/sanitizers/sancov_merger.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script for merging sancov files in parallel.
+
+When merging test runner output, the sancov files are expected
+to be located in one directory with the file-name pattern:
+<executable name>.test.<id>.sancov
+
+For each executable, this script writes a new file:
+<executable name>.result.sancov
+
+When --swarming-output-dir is specified, this script will merge the result
+files found there into the coverage folder.
+
+The sancov tool is expected to be in the llvm compiler-rt third-party
+directory. It's not checked out by default and must be added as a custom deps:
+'v8/third_party/llvm/projects/compiler-rt':
+    'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
+"""
+
+import argparse
+import logging
+import math
+import os
+import re
+import subprocess
+import sys
+
+from multiprocessing import Pool, cpu_count
+
+
+logging.basicConfig(level=logging.INFO)
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+
+# The sancov tool location.
+SANCOV_TOOL = os.path.join(
+    BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
+    'lib', 'sanitizer_common', 'scripts', 'sancov.py')
+
+# Number of cpus.
+CPUS = cpu_count()
+
+# Regexp to find sancov file as output by the v8 test runner. Also grabs the
+# executable name in group 1.
+SANCOV_FILE_RE = re.compile(r'^(.*)\.test\.\d+\.sancov$')
+
+# Regexp to find sancov result files as returned from swarming.
+SANCOV_RESULTS_FILE_RE = re.compile(r'^.*\.result\.sancov$')
+
+
+def merge(args):
+  """Merge several sancov files into one.
+
+  Called trough multiprocessing pool. The args are expected to unpack to:
+    keep: Option if source and intermediate sancov files should be kept.
+    coverage_dir: Folder where to find the sancov files.
+    executable: Name of the executable whose sancov files should be merged.
+    index: A number to be put into the intermediate result file name.
+           If None, this is a final result.
+    bucket: The list of sancov files to be merged.
+  Returns: A tuple with the executable name and the result file name.
+  """
+  keep, coverage_dir, executable, index, bucket = args
+  process = subprocess.Popen(
+      [SANCOV_TOOL, 'merge'] + bucket,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      cwd=coverage_dir,
+  )
+  output, _ = process.communicate()
+  assert process.returncode == 0
+  if index is not None:
+    # This is an intermediate result, add the bucket index to the file name.
+    result_file_name = '%s.result.%d.sancov' % (executable, index)
+  else:
+    # This is the final result without bucket index.
+    result_file_name = '%s.result.sancov' % executable
+  with open(os.path.join(coverage_dir, result_file_name), "wb") as f:
+    f.write(output)
+  if not keep:
+    for f in bucket:
+      os.remove(os.path.join(coverage_dir, f))
+  return executable, result_file_name
+
+
+def generate_inputs(keep, coverage_dir, file_map, cpus):
+  """Generate inputs for multiprocessed merging.
+
+  Splits the sancov files into several buckets, so that each bucket can be
+  merged in a separate process. We have only few executables in total with
+  mostly lots of associated files. In the general case, with many executables
+  we might need to avoid splitting buckets of executables with few files.
+
+  Returns: List of args as expected by merge above.
+  """
+  inputs = []
+  for executable, files in file_map.iteritems():
+    # What's the bucket size for distributing files for merging? E.g. with
+    # 2 cpus and 9 files we want bucket size 5.
+    n = max(2, int(math.ceil(len(files) / float(cpus))))
+
+    # Chop files into buckets.
+    buckets = [files[i:i+n] for i in xrange(0, len(files), n)]
+
+    # Inputs for multiprocessing. List of tuples containing:
+    # Keep-files option, base path, executable name, index of bucket,
+    # list of files.
+    inputs.extend([(keep, coverage_dir, executable, i, b)
+                   for i, b in enumerate(buckets)])
+  return inputs
+
+
+def merge_parallel(inputs, merge_fun=merge):
+  """Process several merge jobs in parallel."""
+  pool = Pool(CPUS)
+  try:
+    return pool.map(merge_fun, inputs)
+  finally:
+    pool.close()
+
+
+def merge_test_runner_output(options):
+  # Map executable names to their respective sancov files.
+  file_map = {}
+  for f in os.listdir(options.coverage_dir):
+    match = SANCOV_FILE_RE.match(f)
+    if match:
+      file_map.setdefault(match.group(1), []).append(f)
+
+  inputs = generate_inputs(
+      options.keep, options.coverage_dir, file_map, CPUS)
+
+  logging.info('Executing %d merge jobs in parallel for %d executables.' %
+               (len(inputs), len(file_map)))
+
+  results = merge_parallel(inputs)
+
+  # Map executable names to intermediate bucket result files.
+  file_map = {}
+  for executable, f in results:
+    file_map.setdefault(executable, []).append(f)
+
+  # Merge the bucket results for each executable.
+  # The final result has index None, so no index will appear in the
+  # file name.
+  inputs = [(options.keep, options.coverage_dir, executable, None, files)
+             for executable, files in file_map.iteritems()]
+
+  logging.info('Merging %d intermediate results.' % len(inputs))
+
+  merge_parallel(inputs)
+
+
+def merge_two(args):
+  """Merge two sancov files.
+
+  Called trough multiprocessing pool. The args are expected to unpack to:
+    swarming_output_dir: Folder where to find the new file.
+    coverage_dir: Folder where to find the existing file.
+    f: File name of the file to be merged.
+  """
+  swarming_output_dir, coverage_dir, f = args
+  input_file = os.path.join(swarming_output_dir, f)
+  output_file = os.path.join(coverage_dir, f)
+  process = subprocess.Popen(
+      [SANCOV_TOOL, 'merge', input_file, output_file],
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+  )
+  output, _ = process.communicate()
+  assert process.returncode == 0
+  with open(output_file, "wb") as f:
+    f.write(output)
+
+
+def merge_swarming_output(options):
+  # Iterate sancov files from swarming.
+  files = []
+  for f in os.listdir(options.swarming_output_dir):
+    match = SANCOV_RESULTS_FILE_RE.match(f)
+    if match:
+      if os.path.exists(os.path.join(options.coverage_dir, f)):
+        # If the same file already exists, we'll merge the data.
+        files.append(f)
+      else:
+        # No file yet? Just move it.
+        os.rename(os.path.join(options.swarming_output_dir, f),
+                  os.path.join(options.coverage_dir, f))
+
+  inputs = [(options.swarming_output_dir, options.coverage_dir, f)
+            for f in files]
+
+  logging.info('Executing %d merge jobs in parallel.' % len(inputs))
+  merge_parallel(inputs, merge_two)
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--coverage-dir', required=True,
+                      help='Path to the sancov output files.')
+  parser.add_argument('--keep', default=False, action='store_true',
+                      help='Keep sancov output files after merging.')
+  parser.add_argument('--swarming-output-dir',
+                      help='Folder containing a results shard from swarming.')
+  options = parser.parse_args()
+
+  # Check if folder with coverage output exists.
+  assert (os.path.exists(options.coverage_dir) and
+          os.path.isdir(options.coverage_dir))
+
+  if options.swarming_output_dir:
+    # Check if folder with swarming output exists.
+    assert (os.path.exists(options.swarming_output_dir) and
+            os.path.isdir(options.swarming_output_dir))
+    merge_swarming_output(options)
+  else:
+    merge_test_runner_output(options)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/tools/sanitizers/sancov_merger_test.py b/tools/sanitizers/sancov_merger_test.py
new file mode 100644
index 0000000..93b89eb
--- /dev/null
+++ b/tools/sanitizers/sancov_merger_test.py
@@ -0,0 +1,82 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import sancov_merger
+
+
+# Files on disk after test runner completes. The files are mapped by
+# executable name -> file list.
+FILE_MAP = {
+  'd8': [
+    'd8.test.1.sancov',
+    'd8.test.2.sancov',
+    'd8.test.3.sancov',
+    'd8.test.4.sancov',
+    'd8.test.5.sancov',
+    'd8.test.6.sancov',
+    'd8.test.7.sancov',
+  ],
+  'cctest': [
+    'cctest.test.1.sancov',
+    'cctest.test.2.sancov',
+    'cctest.test.3.sancov',
+    'cctest.test.4.sancov',
+  ],
+}
+
+
+# Inputs for merge process with 2 cpus. The tuples contain:
+# (flag, path, executable name, intermediate result index, file list).
+EXPECTED_INPUTS_2 = [
+  (False, '/some/path', 'cctest', 0, [
+    'cctest.test.1.sancov',
+    'cctest.test.2.sancov']),
+  (False, '/some/path', 'cctest', 1, [
+    'cctest.test.3.sancov',
+    'cctest.test.4.sancov']),
+  (False, '/some/path', 'd8', 0, [
+    'd8.test.1.sancov',
+    'd8.test.2.sancov',
+    'd8.test.3.sancov',
+    'd8.test.4.sancov']),
+  (False, '/some/path', 'd8', 1, [
+    'd8.test.5.sancov',
+    'd8.test.6.sancov',
+    'd8.test.7.sancov']),
+]
+
+
+# The same for 4 cpus.
+EXPECTED_INPUTS_4 = [
+  (True, '/some/path', 'cctest', 0, [
+    'cctest.test.1.sancov',
+    'cctest.test.2.sancov']),
+  (True, '/some/path', 'cctest', 1, [
+    'cctest.test.3.sancov',
+    'cctest.test.4.sancov']),
+  (True, '/some/path', 'd8', 0, [
+    'd8.test.1.sancov',
+    'd8.test.2.sancov']),
+  (True, '/some/path', 'd8', 1, [
+    'd8.test.3.sancov',
+    'd8.test.4.sancov']),
+  (True, '/some/path', 'd8', 2, [
+    'd8.test.5.sancov',
+    'd8.test.6.sancov']),
+  (True, '/some/path', 'd8', 3, [
+    'd8.test.7.sancov'])]
+
+
+class MergerTests(unittest.TestCase):
+  def test_generate_inputs_2_cpu(self):
+    inputs = sancov_merger.generate_inputs(
+        False, '/some/path', FILE_MAP, 2)
+    self.assertEquals(EXPECTED_INPUTS_2, inputs)
+
+  def test_generate_inputs_4_cpu(self):
+    inputs = sancov_merger.generate_inputs(
+        True, '/some/path', FILE_MAP, 4)
+    self.assertEquals(EXPECTED_INPUTS_4, inputs)
diff --git a/tools/sanitizers/sanitize_pcs.py b/tools/sanitizers/sanitize_pcs.py
new file mode 100755
index 0000000..47f2715
--- /dev/null
+++ b/tools/sanitizers/sanitize_pcs.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Corrects objdump output. The logic is from sancov.py, see comments there."""
+
+import sys;
+
+for line in sys.stdin:
+  print '0x%x' % (int(line.strip(), 16) + 4)
diff --git a/tools/testrunner/local/commands.py b/tools/testrunner/local/commands.py
index a4df32c..e725d11 100644
--- a/tools/testrunner/local/commands.py
+++ b/tools/testrunner/local/commands.py
@@ -107,14 +107,16 @@
   timer.start()
   stdout, stderr = process.communicate()
   timer.cancel()
-  return process.returncode, timeout_result[0], stdout, stderr
+
+  return output.Output(
+      process.returncode,
+      timeout_result[0],
+      stdout,
+      stderr,
+      process.pid,
+  )
 
 
 def Execute(args, verbose=False, timeout=None):
   args = [ c for c in args if c != "" ]
-  exit_code, timed_out, stdout, stderr = RunProcess(
-    verbose,
-    timeout,
-    args=args,
-  )
-  return output.Output(exit_code, timed_out, stdout, stderr)
+  return RunProcess(verbose, timeout, args=args)
diff --git a/tools/testrunner/local/execution.py b/tools/testrunner/local/execution.py
index 0d90ab8..e0aec0b 100644
--- a/tools/testrunner/local/execution.py
+++ b/tools/testrunner/local/execution.py
@@ -49,9 +49,8 @@
 
 
 class Instructions(object):
-  def __init__(self, command, dep_command, test_id, timeout, verbose):
+  def __init__(self, command, test_id, timeout, verbose):
     self.command = command
-    self.dep_command = dep_command
     self.id = test_id
     self.timeout = timeout
     self.verbose = verbose
@@ -112,12 +111,7 @@
   # the like.
   if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
     timeout *= 2
-  if test.dependency is not None:
-    dep_command = [ c.replace(test.path, test.dependency) for c in command ]
-  else:
-    dep_command = None
-  return Instructions(
-      command, dep_command, test.id, timeout, context.verbose)
+  return Instructions(command, test.id, timeout, context.verbose)
 
 
 class Job(object):
@@ -143,13 +137,33 @@
     # Extra debuging information when files are claimed missing.
     f = match.group(1)
     stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f))
-  return test.id, output.Output(1, False, "", stderr), 0
+  return test.id, output.Output(1, False, "", stderr, None), 0
 
 
 class TestJob(Job):
   def __init__(self, test):
     self.test = test
 
+  def _rename_coverage_data(self, output, context):
+    """Rename coverage data.
+
+    Rename files with PIDs to files with unique test IDs, because the number
+    of tests might be higher than pid_max. E.g.:
+    d8.1234.sancov -> d8.test.1.sancov, where 1234 was the process' PID
+    and 1 is the test ID.
+    """
+    if context.sancov_dir and output.pid is not None:
+      sancov_file = os.path.join(
+          context.sancov_dir, "%s.%d.sancov" % (self.test.shell(), output.pid))
+
+      # Some tests are expected to fail and don't produce coverage data.
+      if os.path.exists(sancov_file):
+        parts = sancov_file.split(".")
+        new_sancov_file = ".".join(
+            parts[:-2] + ["test", str(self.test.id)] + parts[-1:])
+        assert not os.path.exists(new_sancov_file)
+        os.rename(sancov_file, new_sancov_file)
+
   def Run(self, process_context):
     try:
       # Retrieve a new suite object on the worker-process side. The original
@@ -160,16 +174,8 @@
       return SetupProblem(e, self.test)
 
     start_time = time.time()
-    if instr.dep_command is not None:
-      dep_output = commands.Execute(
-          instr.dep_command, instr.verbose, instr.timeout)
-      # TODO(jkummerow): We approximate the test suite specific function
-      # IsFailureOutput() by just checking the exit code here. Currently
-      # only cctests define dependencies, for which this simplification is
-      # correct.
-      if dep_output.exit_code != 0:
-        return (instr.id, dep_output, time.time() - start_time)
     output = commands.Execute(instr.command, instr.verbose, instr.timeout)
+    self._rename_coverage_data(output, process_context.context)
     return (instr.id, output, time.time() - start_time)
 
 
diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py
index f86106b..7e96cc3 100644
--- a/tools/testrunner/local/statusfile.py
+++ b/tools/testrunner/local/statusfile.py
@@ -35,7 +35,6 @@
 TIMEOUT = "TIMEOUT"
 CRASH = "CRASH"
 SLOW = "SLOW"
-FLAKY = "FLAKY"
 FAST_VARIANTS = "FAST_VARIANTS"
 NO_VARIANTS = "NO_VARIANTS"
 # These are just for the status files and are mapped below in DEFS:
@@ -46,7 +45,7 @@
 ALWAYS = "ALWAYS"
 
 KEYWORDS = {}
-for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
+for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FAIL_OK,
             FAST_VARIANTS, NO_VARIANTS, PASS_OR_FAIL, FAIL_SLOPPY, ALWAYS]:
   KEYWORDS[key] = key
 
@@ -59,7 +58,7 @@
             "android_arm", "android_arm64", "android_ia32", "android_x87",
             "android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
             "mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
-            "macos", "windows", "linux", "aix"]:
+            "s390", "s390x", "macos", "windows", "linux", "aix"]:
   VARIABLES[var] = var
 
 
@@ -79,10 +78,6 @@
   return FAST_VARIANTS in outcomes
 
 
-def IsFlaky(outcomes):
-  return FLAKY in outcomes
-
-
 def IsPassOrFail(outcomes):
   return ((PASS in outcomes) and (FAIL in outcomes) and
           (not CRASH in outcomes) and (not OKAY in outcomes))
diff --git a/tools/testrunner/local/testsuite.py b/tools/testrunner/local/testsuite.py
index 55e0eb2..f43d008 100644
--- a/tools/testrunner/local/testsuite.py
+++ b/tools/testrunner/local/testsuite.py
@@ -102,7 +102,6 @@
 
   def __init__(self, name, root):
     # Note: This might be called concurrently from different processes.
-    # Changing harddisk state should be done in 'SetupWorkingDirectory' below.
     self.name = name  # string
     self.root = root  # string containing path
     self.tests = None  # list of TestCase objects
@@ -110,11 +109,6 @@
     self.wildcards = None  # dictionary mapping test paths to list of outcomes
     self.total_duration = None  # float, assigned on demand
 
-  def SetupWorkingDirectory(self):
-    # This is called once per test suite object in a multi-process setting.
-    # Multi-process-unsafe work-directory setup can go here.
-    pass
-
   def shell(self):
     return "d8"
 
@@ -159,10 +153,6 @@
     self.tests = self.ListTests(context)
 
   @staticmethod
-  def _FilterFlaky(flaky, mode):
-    return (mode == "run" and not flaky) or (mode == "skip" and flaky)
-
-  @staticmethod
   def _FilterSlow(slow, mode):
     return (mode == "run" and not slow) or (mode == "skip" and slow)
 
@@ -171,13 +161,11 @@
     return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
 
   def FilterTestCasesByStatus(self, warn_unused_rules,
-                              flaky_tests="dontcare",
                               slow_tests="dontcare",
                               pass_fail_tests="dontcare"):
     filtered = []
     used_rules = set()
     for t in self.tests:
-      flaky = False
       slow = False
       pass_fail = False
       testname = self.CommonTestName(t)
@@ -191,7 +179,6 @@
         for outcome in t.outcomes:
           if outcome.startswith('Flags: '):
             t.flags += outcome[7:].split()
-        flaky = statusfile.IsFlaky(t.outcomes)
         slow = statusfile.IsSlow(t.outcomes)
         pass_fail = statusfile.IsPassOrFail(t.outcomes)
       skip = False
@@ -203,10 +190,9 @@
           if statusfile.DoSkip(t.outcomes):
             skip = True
             break  # "for rule in self.wildcards"
-          flaky = flaky or statusfile.IsFlaky(t.outcomes)
           slow = slow or statusfile.IsSlow(t.outcomes)
           pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
-      if (skip or self._FilterFlaky(flaky, flaky_tests)
+      if (skip
           or self._FilterSlow(slow, slow_tests)
           or self._FilterPassFail(pass_fail, pass_fail_tests)):
         continue  # "for t in self.tests"
@@ -262,14 +248,14 @@
   def GetSourceForTest(self, testcase):
     return "(no source available)"
 
-  def IsFailureOutput(self, output, testpath):
-    return output.exit_code != 0
+  def IsFailureOutput(self, testcase):
+    return testcase.output.exit_code != 0
 
   def IsNegativeTest(self, testcase):
     return False
 
   def HasFailed(self, testcase):
-    execution_failed = self.IsFailureOutput(testcase.output, testcase.path)
+    execution_failed = self.IsFailureOutput(testcase)
     if self.IsNegativeTest(testcase):
       return not execution_failed
     else:
@@ -328,9 +314,9 @@
       if test_desc.endswith('.'):
         test_case = test_desc
       elif test_case and test_desc:
-        test = testcase.TestCase(self, test_case + test_desc, dependency=None)
+        test = testcase.TestCase(self, test_case + test_desc)
         tests.append(test)
-    tests.sort()
+    tests.sort(key=lambda t: t.path)
     return tests
 
   def GetFlagsForTestCase(self, testcase, context):
diff --git a/tools/testrunner/local/utils.py b/tools/testrunner/local/utils.py
index cb6c350..c880dfc 100644
--- a/tools/testrunner/local/utils.py
+++ b/tools/testrunner/local/utils.py
@@ -102,6 +102,8 @@
     return 'ia32'
   elif machine == 'amd64':
     return 'ia32'
+  elif machine == 's390x':
+    return 's390'
   elif machine == 'ppc64':
     return 'ppc'
   else:
diff --git a/tools/testrunner/objects/context.py b/tools/testrunner/objects/context.py
index c9853d0..6bcbfb6 100644
--- a/tools/testrunner/objects/context.py
+++ b/tools/testrunner/objects/context.py
@@ -30,7 +30,7 @@
   def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
                isolates, command_prefix, extra_flags, noi18n, random_seed,
                no_sorting, rerun_failures_count, rerun_failures_max,
-               predictable, no_harness, use_perf_data):
+               predictable, no_harness, use_perf_data, sancov_dir):
     self.arch = arch
     self.mode = mode
     self.shell_dir = shell_dir
@@ -48,13 +48,14 @@
     self.predictable = predictable
     self.no_harness = no_harness
     self.use_perf_data = use_perf_data
+    self.sancov_dir = sancov_dir
 
   def Pack(self):
     return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
             self.command_prefix, self.extra_flags, self.noi18n,
             self.random_seed, self.no_sorting, self.rerun_failures_count,
             self.rerun_failures_max, self.predictable, self.no_harness,
-            self.use_perf_data]
+            self.use_perf_data, self.sancov_dir]
 
   @staticmethod
   def Unpack(packed):
@@ -62,4 +63,4 @@
     return Context(packed[0], packed[1], None, packed[2], False,
                    packed[3], packed[4], packed[5], packed[6], packed[7],
                    packed[8], packed[9], packed[10], packed[11], packed[12],
-                   packed[13], packed[14])
+                   packed[13], packed[14], packed[15])
diff --git a/tools/testrunner/objects/output.py b/tools/testrunner/objects/output.py
index 87b4c84..b4bb01f 100644
--- a/tools/testrunner/objects/output.py
+++ b/tools/testrunner/objects/output.py
@@ -32,11 +32,12 @@
 
 class Output(object):
 
-  def __init__(self, exit_code, timed_out, stdout, stderr):
+  def __init__(self, exit_code, timed_out, stdout, stderr, pid):
     self.exit_code = exit_code
     self.timed_out = timed_out
     self.stdout = stdout
     self.stderr = stderr
+    self.pid = pid
 
   def HasCrashed(self):
     if utils.IsWindows():
@@ -52,9 +53,9 @@
     return self.timed_out
 
   def Pack(self):
-    return [self.exit_code, self.timed_out, self.stdout, self.stderr]
+    return [self.exit_code, self.timed_out, self.stdout, self.stderr, self.pid]
 
   @staticmethod
   def Unpack(packed):
     # For the order of the fields, refer to Pack() above.
-    return Output(packed[0], packed[1], packed[2], packed[3])
+    return Output(packed[0], packed[1], packed[2], packed[3], packed[4])
diff --git a/tools/testrunner/objects/testcase.py b/tools/testrunner/objects/testcase.py
index b91f8b4..113c624 100644
--- a/tools/testrunner/objects/testcase.py
+++ b/tools/testrunner/objects/testcase.py
@@ -30,12 +30,11 @@
 
 class TestCase(object):
   def __init__(self, suite, path, variant='default', flags=None,
-               dependency=None, override_shell=None):
+               override_shell=None):
     self.suite = suite        # TestSuite object
     self.path = path          # string, e.g. 'div-mod', 'test-api/foo'
     self.flags = flags or []  # list of strings, flags specific to this test
     self.variant = variant    # name of the used testing variant
-    self.dependency = dependency  # |path| for testcase that must be run first
     self.override_shell = override_shell
     self.outcomes = set([])
     self.output = None
@@ -45,7 +44,7 @@
 
   def CopyAddingFlags(self, variant, flags):
     copy = TestCase(self.suite, self.path, variant, self.flags + flags,
-                    self.dependency, self.override_shell)
+                    self.override_shell)
     copy.outcomes = self.outcomes
     return copy
 
@@ -56,16 +55,16 @@
     """
     assert self.id is not None
     return [self.suitename(), self.path, self.variant, self.flags,
-            self.dependency, self.override_shell, list(self.outcomes or []),
+            self.override_shell, list(self.outcomes or []),
             self.id]
 
   @staticmethod
   def UnpackTask(task):
     """Creates a new TestCase object based on packed task data."""
     # For the order of the fields, refer to PackTask() above.
-    test = TestCase(str(task[0]), task[1], task[2], task[3], task[4], task[5])
-    test.outcomes = set(task[6])
-    test.id = task[7]
+    test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
+    test.outcomes = set(task[5])
+    test.id = task[6]
     test.run = 1
     return test
 
@@ -101,3 +100,11 @@
     send the name only and retrieve a process-local suite later.
     """
     return dict(self.__dict__, suite=self.suite.name)
+
+  def __cmp__(self, other):
+    # Make sure that test cases are sorted correctly if sorted without
+    # key function. But using a key function is preferred for speed.
+    return cmp(
+        (self.suite.name, self.path, self.flags),
+        (other.suite.name, other.path, other.flags),
+    )
diff --git a/tools/testrunner/testrunner.isolate b/tools/testrunner/testrunner.isolate
index 669614b..1e8e9dc 100644
--- a/tools/testrunner/testrunner.isolate
+++ b/tools/testrunner/testrunner.isolate
@@ -11,4 +11,14 @@
       './'
     ],
   },
-}
\ No newline at end of file
+  'conditions': [
+    ['coverage==1 and sanitizer_coverage=="bb"', {
+      'variables': {
+        'files': [
+          '../sanitizers/sancov_merger.py',
+          '../../third_party/llvm/projects/compiler-rt/lib/sanitizer_common/scripts/sancov.py',
+        ],
+      },
+    }],
+  ],
+}
diff --git a/tools/tick-processor.html b/tools/tick-processor.html
index bc9f636..a785a6e 100644
--- a/tools/tick-processor.html
+++ b/tools/tick-processor.html
@@ -82,7 +82,6 @@
 function start_process() {
   ArgumentsProcessor.DEFAULTS = {
     logFileName: 'v8.log',
-    snapshotLogFileName: null,
     platform: 'unix',
     stateFilter: null,
     callGraphSize: 5,
@@ -98,8 +97,6 @@
     'mac': MacCppEntriesProvider
   };
 
-  var snapshotLogProcessor; // not used
-
   var tickProcessor = new TickProcessor(
     new (entriesProviders[ArgumentsProcessor.DEFAULTS.platform])(
         ArgumentsProcessor.DEFAULTS.nm,
@@ -107,8 +104,7 @@
     ArgumentsProcessor.DEFAULTS.separateIc,
     ArgumentsProcessor.DEFAULTS.callGraphSize,
     ArgumentsProcessor.DEFAULTS.ignoreUnknown,
-    ArgumentsProcessor.DEFAULTS.stateFilter,
-    snapshotLogProcessor);
+    ArgumentsProcessor.DEFAULTS.stateFilter);
 
   tickProcessor.processLogChunk(v8log_content);
   tickProcessor.printStatistics();
diff --git a/tools/tickprocessor-driver.js b/tools/tickprocessor-driver.js
index dc8a87d..3f2321f 100644
--- a/tools/tickprocessor-driver.js
+++ b/tools/tickprocessor-driver.js
@@ -61,18 +61,12 @@
   initSourceMapSupport();
   sourceMap = SourceMap.load(params.sourceMap);
 }
-var snapshotLogProcessor;
-if (params.snapshotLogFileName) {
-  snapshotLogProcessor = new SnapshotLogProcessor();
-  snapshotLogProcessor.processLogFile(params.snapshotLogFileName);
-}
 var tickProcessor = new TickProcessor(
   new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
   params.separateIc,
   params.callGraphSize,
   params.ignoreUnknown,
   params.stateFilter,
-  snapshotLogProcessor,
   params.distortion,
   params.range,
   sourceMap,
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 600d2ee..ba7401a 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -70,88 +70,12 @@
 }
 
 
-function SnapshotLogProcessor() {
-  LogReader.call(this, {
-      'code-creation': {
-          parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
-          processor: this.processCodeCreation },
-      'code-move': { parsers: [parseInt, parseInt],
-          processor: this.processCodeMove },
-      'code-delete': { parsers: [parseInt],
-          processor: this.processCodeDelete },
-      'function-creation': null,
-      'function-move': null,
-      'function-delete': null,
-      'sfi-move': null,
-      'snapshot-pos': { parsers: [parseInt, parseInt],
-          processor: this.processSnapshotPosition }});
-
-  V8Profile.prototype.handleUnknownCode = function(operation, addr) {
-    var op = Profile.Operation;
-    switch (operation) {
-      case op.MOVE:
-        print('Snapshot: Code move event for unknown code: 0x' +
-              addr.toString(16));
-        break;
-      case op.DELETE:
-        print('Snapshot: Code delete event for unknown code: 0x' +
-              addr.toString(16));
-        break;
-    }
-  };
-
-  this.profile_ = new V8Profile();
-  this.serializedEntries_ = [];
-}
-inherits(SnapshotLogProcessor, LogReader);
-
-
-SnapshotLogProcessor.prototype.processCodeCreation = function(
-    type, kind, start, size, name, maybe_func) {
-  if (maybe_func.length) {
-    var funcAddr = parseInt(maybe_func[0]);
-    var state = parseState(maybe_func[1]);
-    this.profile_.addFuncCode(type, name, start, size, funcAddr, state);
-  } else {
-    this.profile_.addCode(type, name, start, size);
-  }
-};
-
-
-SnapshotLogProcessor.prototype.processCodeMove = function(from, to) {
-  this.profile_.moveCode(from, to);
-};
-
-
-SnapshotLogProcessor.prototype.processCodeDelete = function(start) {
-  this.profile_.deleteCode(start);
-};
-
-
-SnapshotLogProcessor.prototype.processSnapshotPosition = function(addr, pos) {
-  this.serializedEntries_[pos] = this.profile_.findEntry(addr);
-};
-
-
-SnapshotLogProcessor.prototype.processLogFile = function(fileName) {
-  var contents = readFile(fileName);
-  this.processLogChunk(contents);
-};
-
-
-SnapshotLogProcessor.prototype.getSerializedEntryName = function(pos) {
-  var entry = this.serializedEntries_[pos];
-  return entry ? entry.getRawName() : null;
-};
-
-
 function TickProcessor(
     cppEntriesProvider,
     separateIc,
     callGraphSize,
     ignoreUnknown,
     stateFilter,
-    snapshotLogProcessor,
     distortion,
     range,
     sourceMap,
@@ -170,8 +94,6 @@
           processor: this.processCodeDelete },
       'sfi-move': { parsers: [parseInt, parseInt],
           processor: this.processFunctionMove },
-      'snapshot-pos': { parsers: [parseInt, parseInt],
-          processor: this.processSnapshotPosition },
       'tick': {
           parsers: [parseInt, parseInt, parseInt,
                     parseInt, parseInt, 'var-args'],
@@ -202,7 +124,6 @@
   this.callGraphSize_ = callGraphSize;
   this.ignoreUnknown_ = ignoreUnknown;
   this.stateFilter_ = stateFilter;
-  this.snapshotLogProcessor_ = snapshotLogProcessor;
   this.sourceMap = sourceMap;
   this.deserializedEntriesNames_ = [];
   var ticks = this.ticks_ =
@@ -362,14 +283,6 @@
 };
 
 
-TickProcessor.prototype.processSnapshotPosition = function(addr, pos) {
-  if (this.snapshotLogProcessor_) {
-    this.deserializedEntriesNames_[addr] =
-      this.snapshotLogProcessor_.getSerializedEntryName(pos);
-  }
-};
-
-
 TickProcessor.prototype.includeTick = function(vmState) {
   return this.stateFilter_ == null || this.stateFilter_ == vmState;
 };
@@ -757,8 +670,11 @@
 MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
   this.parsePos = 0;
   libName = this.targetRootFS + libName;
+
+  // It seems that in OS X `nm` thinks that `-f` is a format option, not a
+  // "flat" display option flag.
   try {
-    this.symbols = [os.system(this.nmExec, ['-n', '-f', libName], -1, -1), ''];
+    this.symbols = [os.system(this.nmExec, ['-n', libName], -1, -1), ''];
   } catch (e) {
     // If the library cannot be found on this system let's not panic.
     this.symbols = '';
@@ -880,8 +796,6 @@
         'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
     '--target': ['targetRootFS', '',
         'Specify the target root directory for cross environment'],
-    '--snapshot-log': ['snapshotLogFileName', 'snapshot.log',
-        'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)'],
     '--range': ['range', 'auto,auto',
         'Specify the range limit as [start],[end]'],
     '--distortion': ['distortion', 0,
@@ -906,7 +820,6 @@
 
 ArgumentsProcessor.DEFAULTS = {
   logFileName: 'v8.log',
-  snapshotLogFileName: null,
   platform: 'unix',
   stateFilter: null,
   callGraphSize: 5,